diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 5ea58046f49..53e964188f6 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -3,7 +3,11 @@ GitHub is reserved for bug reports and feature requests. The best place
to ask a general question is at the Elastic Discourse forums at
https://discuss.elastic.co. If you are in fact posting a bug report or
a feature request, please include one and only one of the below blocks
-in your new issue.
+in your new issue. Note that whether you're filing a bug report or a
+feature request, ensure that your submission is for an
+[OS that we support](https://www.elastic.co/support/matrix#show_os).
+Bug reports on an OS that we do not support or feature requests
+specific to an OS that we do not support will be closed.
-->
- Have you signed the [contributor license agreement](https://www.elastic.co/contributor-agreement)?
-- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/master/.github/CONTRIBUTING.md)?
+- Have you followed the [contributor guidelines](https://github.com/elastic/elasticsearch/blob/master/CONTRIBUTING.md)?
- If submitting code, have you built your formula locally prior to submission with `gradle check`?
- If submitting code, is your pull request against master? Unless there is a good reason otherwise, we prefer pull requests against master and will backport as needed.
- If submitting code, have you checked that your submission is for an [OS that we support](https://www.elastic.co/support/matrix#show_os)?
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 1a4e5b58f33..00000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: java
-jdk:
- - openjdk7
-
-env:
- - ES_TEST_LOCAL=true
- - ES_TEST_LOCAL=false
-
-notifications:
- email: false
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 9afcd34fad7..b0f1e054e46 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -71,12 +71,47 @@ Once your changes and tests are ready to submit for review:
Then sit back and wait. There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into Elasticsearch.
+Please adhere to the general guideline that you should never force push
+to a publicly shared branch. Once you have opened your pull request, you
+should consider your branch publicly shared. Instead of force pushing
+you can just add incremental commits; this is generally easier on your
+reviewers. If you need to pick up changes from master, you can merge
+master into your branch. A reviewer might ask you to rebase a
+long-running pull request in which case force pushing is okay for that
+request. Note that squashing at the end of the review process should
+also not be done, that can be done when the pull request is [integrated
+via GitHub](https://github.com/blog/2141-squash-your-commits).
+
Contributing to the Elasticsearch codebase
------------------------------------------
**Repository:** [https://github.com/elastic/elasticsearch](https://github.com/elastic/elasticsearch)
-Make sure you have [Gradle](http://gradle.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE: `gradle eclipse` then `File: Import: Existing Projects into Workspace`. Select the option `Search for nested projects`. Additionally you will want to ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini` accordingly to avoid GC overhead errors.
+Make sure you have [Gradle](http://gradle.org) installed, as
+Elasticsearch uses it as its build system.
+
+Eclipse users can automatically configure their IDE: `gradle eclipse`
+then `File: Import: Existing Projects into Workspace`. Select the
+option `Search for nested projects`. Additionally you will want to
+ensure that Eclipse is using 2048m of heap by modifying `eclipse.ini`
+accordingly to avoid GC overhead errors.
+
+IntelliJ users can automatically configure their IDE: `gradle idea`
+then `File->New Project From Existing Sources`. Point to the root of
+the source directory, select
+`Import project from external model->Gradle`, enable
+`Use auto-import`.
+
+The Elasticsearch codebase makes heavy use of Java `assert`s and the
+test runner requires that assertions be enabled within the JVM. This
+can be accomplished by passing the flag `-ea` to the JVM on startup.
+
+For IntelliJ, go to
+`Run->Edit Configurations...->Defaults->JUnit->VM options` and input
+`-ea`.
+
+For Eclipse, go to `Preferences->Java->Installed JREs` and add `-ea` to
+`VM Arguments`.
Please follow these formatting guidelines:
diff --git a/README.textile b/README.textile
index 5c75844b108..69d3fd54767 100644
--- a/README.textile
+++ b/README.textile
@@ -50,19 +50,19 @@ h3. Indexing
Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
-curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
+curl -XPUT 'http://localhost:9200/twitter/user/kimchy?pretty' -d '{ "name" : "Shay Banon" }'
-curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
+curl -XPUT 'http://localhost:9200/twitter/tweet/1?pretty' -d '
{
"user": "kimchy",
- "postDate": "2009-11-15T13:12:00",
+ "post_date": "2009-11-15T13:12:00",
"message": "Trying out Elasticsearch, so far so good?"
}'
-curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
+curl -XPUT 'http://localhost:9200/twitter/tweet/2?pretty' -d '
{
"user": "kimchy",
- "postDate": "2009-11-15T14:12:12",
+ "post_date": "2009-11-15T14:12:12",
"message": "Another tweet, will it be indexed?"
}'
@@ -101,7 +101,7 @@ Just for kicks, let's get all the documents stored (we should see the user as we
curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
{
"query" : {
- "matchAll" : {}
+ "match_all" : {}
}
}'
@@ -113,7 +113,7 @@ curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
{
"query" : {
"range" : {
- "postDate" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" }
+ "post_date" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" }
}
}
}'
@@ -130,19 +130,19 @@ Elasticsearch supports multiple indices, as well as multiple types per index. In
Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
-curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
+curl -XPUT 'http://localhost:9200/kimchy/info/1?pretty' -d '{ "name" : "Shay Banon" }'
-curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
+curl -XPUT 'http://localhost:9200/kimchy/tweet/1?pretty' -d '
{
"user": "kimchy",
- "postDate": "2009-11-15T13:12:00",
+ "post_date": "2009-11-15T13:12:00",
"message": "Trying out Elasticsearch, so far so good?"
}'
-curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
+curl -XPUT 'http://localhost:9200/kimchy/tweet/2?pretty' -d '
{
"user": "kimchy",
- "postDate": "2009-11-15T14:12:12",
+ "post_date": "2009-11-15T14:12:12",
"message": "Another tweet, will it be indexed?"
}'
@@ -152,11 +152,11 @@ The above will index information into the @kimchy@ index, with two types, @info@
Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
-curl -XPUT http://localhost:9200/another_user/ -d '
+curl -XPUT http://localhost:9200/another_user?pretty -d '
{
"index" : {
- "numberOfShards" : 1,
- "numberOfReplicas" : 1
+ "number_of_shards" : 1,
+ "number_of_replicas" : 1
}
}'
@@ -168,7 +168,7 @@ index (twitter user), for example:
curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
{
"query" : {
- "matchAll" : {}
+ "match_all" : {}
}
}'
@@ -179,7 +179,7 @@ Or on all the indices:
curl -XGET 'http://localhost:9200/_search?pretty=true' -d '
{
"query" : {
- "matchAll" : {}
+ "match_all" : {}
}
}'
@@ -196,15 +196,15 @@ In order to play with the distributed nature of Elasticsearch, simply bring more
h3. Where to go from here?
-We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website.
+We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website. General questions can be asked on the "Elastic Discourse forum":https://discuss.elastic.co or on IRC on Freenode at "#elasticsearch":https://webchat.freenode.net/#elasticsearch. The Elasticsearch GitHub repository is reserved for bug reports and feature requests only.
h3. Building from Source
-Elasticsearch uses "Gradle":http://gradle.org for its build system. You'll need to have a modern version of Gradle installed - 2.8 should do.
+Elasticsearch uses "Gradle":https://gradle.org for its build system. You'll need to have a modern version of Gradle installed - 2.13 should do.
-In order to create a distribution, simply run the @gradle build@ command in the cloned directory.
+In order to create a distribution, simply run the @gradle assemble@ command in the cloned directory.
-The distribution for each project will be created under the @target/releases@ directory in that project.
+The distribution for each project will be created under the @build/distributions@ directory in that project.
See the "TESTING":TESTING.asciidoc file for more information about
running the Elasticsearch test suite.
diff --git a/TESTING.asciidoc b/TESTING.asciidoc
index 5eea0b8c163..44eda08020a 100644
--- a/TESTING.asciidoc
+++ b/TESTING.asciidoc
@@ -18,24 +18,18 @@ gradle assemble
== Other test options
-To disable and enable network transport, set the `Des.node.mode`.
+To disable and enable network transport, set the `tests.es.node.mode` system property.
Use network transport:
------------------------------------
--Des.node.mode=network
+-Dtests.es.node.mode=network
------------------------------------
Use local transport (default since 1.3):
-------------------------------------
--Des.node.mode=local
--------------------------------------
-
-Alternatively, you can set the `ES_TEST_LOCAL` environment variable:
-
--------------------------------------
-export ES_TEST_LOCAL=true && gradle test
+-Dtests.es.node.mode=local
-------------------------------------
=== Running Elasticsearch from a checkout
@@ -201,7 +195,7 @@ gradle test -Dtests.timeoutSuite=5000! ...
Change the logging level of ES (not gradle)
--------------------------------
-gradle test -Des.logger.level=DEBUG
+gradle test -Dtests.es.logger.level=DEBUG
--------------------------------
Print all the logging output from the test runs to the commandline
@@ -302,7 +296,7 @@ gradle :distribution:integ-test-zip:integTest \
-Dtests.method="test {p0=cat.shards/10_basic/Help}"
---------------------------------------------------------------------------
-`RestNIT` are the executable test classes that runs all the
+`RestIT` are the executable test classes that runs all the
yaml suites available within the `rest-api-spec` folder.
The REST tests support all the options provided by the randomized runner, plus the following:
diff --git a/Vagrantfile b/Vagrantfile
index 4f8ee7164f6..423b50038e0 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -42,7 +42,7 @@ Vagrant.configure(2) do |config|
# debian and it works fine.
config.vm.define "debian-8" do |config|
config.vm.box = "elastic/debian-8-x86_64"
- deb_common config, 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
+ deb_common config, 'echo deb http://cloudfront.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
end
config.vm.define "centos-6" do |config|
config.vm.box = "elastic/centos-6-x86_64"
@@ -60,8 +60,8 @@ Vagrant.configure(2) do |config|
config.vm.box = "elastic/oraclelinux-7-x86_64"
rpm_common config
end
- config.vm.define "fedora-22" do |config|
- config.vm.box = "elastic/fedora-22-x86_64"
+ config.vm.define "fedora-24" do |config|
+ config.vm.box = "elastic/fedora-24-x86_64"
dnf_common config
end
config.vm.define "opensuse-13" do |config|
diff --git a/benchmarks/README.md b/benchmarks/README.md
new file mode 100644
index 00000000000..03aaac7f3c4
--- /dev/null
+++ b/benchmarks/README.md
@@ -0,0 +1,62 @@
+# Elasticsearch Microbenchmark Suite
+
+This directory contains the microbenchmark suite of Elasticsearch. It relies on [JMH](http://openjdk.java.net/projects/code-tools/jmh/).
+
+## Purpose
+
+We do not want to microbenchmark everything but the kitchen sink and should typically rely on our
+[macrobenchmarks](https://elasticsearch-benchmarks.elastic.co/app/kibana#/dashboard/Nightly-Benchmark-Overview) with
+[Rally](http://github.com/elastic/rally). Microbenchmarks are intended to spot performance regressions in performance-critical components.
+The microbenchmark suite is also handy for ad-hoc microbenchmarks but please remove them again before merging your PR.
+
+## Getting Started
+
+Just run `gradle :benchmarks:jmh` from the project root directory. It will build all microbenchmarks, execute them and print the result.
+
+## Running Microbenchmarks
+
+Benchmarks are always run via Gradle with `gradle :benchmarks:jmh`.
+
+Running via an IDE is not supported as the results are meaningless (we have no control over the JVM running the benchmarks).
+
+If you want to run a specific benchmark class, e.g. `org.elasticsearch.benchmark.MySampleBenchmark` or have special requirements
+generate the uberjar with `gradle :benchmarks:jmhJar` and run it directly with:
+
+```
+java -jar benchmarks/build/distributions/elasticsearch-benchmarks-*.jar
+```
+
+JMH supports lots of command line parameters. Add `-h` to the command above to see the available command line options.
+
+## Adding Microbenchmarks
+
+Before adding a new microbenchmark, make yourself familiar with the JMH API. You can check our existing microbenchmarks and also the
+[JMH samples](http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/).
+
+In contrast to tests, the actual name of the benchmark class is not relevant to JMH. However, stick to the naming convention and
+end the class name of a benchmark with `Benchmark`. To have JMH execute a benchmark, annotate the respective methods with `@Benchmark`.
+
+## Tips and Best Practices
+
+To get realistic results, you should exercise care when running benchmarks. Here are a few tips:
+
+### Do
+
+* Ensure that the system executing your microbenchmarks has as little load as possible. Shutdown every process that can cause unnecessary
+ runtime jitter. Watch the `Error` column in the benchmark results to see the run-to-run variance.
+* Ensure to run enough warmup iterations to get the benchmark into a stable state. If you are unsure, don't change the defaults.
+* Avoid CPU migrations by pinning your benchmarks to specific CPU cores. On Linux you can use `taskset`.
+* Fix the CPU frequency to avoid Turbo Boost from kicking in and skewing your results. On Linux you can use `cpufreq-set` and the
+ `performance` CPU governor.
+* Vary the problem input size with `@Param`.
+* Use the integrated profilers in JMH to dig deeper if benchmark results to not match your hypotheses:
+ * Run the generated uberjar directly and use `-prof gc` to check whether the garbage collector runs during a microbenchmarks and skews
+ your results. If so, try to force a GC between runs (`-gc true`) but watch out for the caveats.
+ * Use `-prof perf` or `-prof perfasm` (both only available on Linux) to see hotspots.
+* Have your benchmarks peer-reviewed.
+
+### Don't
+
+* Blindly believe the numbers that your microbenchmark produces but verify them by measuring e.g. with `-prof perfasm`.
+* Run more threads than your number of CPU cores (in case you run multi-threaded microbenchmarks).
+* Look only at the `Score` column and ignore `Error`. Instead take countermeasures to keep `Error` low / variance explainable.
\ No newline at end of file
diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle
new file mode 100644
index 00000000000..186fdca44ea
--- /dev/null
+++ b/benchmarks/build.gradle
@@ -0,0 +1,96 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+buildscript {
+ repositories {
+ maven {
+ url 'https://plugins.gradle.org/m2/'
+ }
+ }
+ dependencies {
+ classpath 'com.github.jengelman.gradle.plugins:shadow:1.2.3'
+ }
+}
+
+apply plugin: 'elasticsearch.build'
+// build an uberjar with all benchmarks
+apply plugin: 'com.github.johnrengelman.shadow'
+// have the shadow plugin provide the runShadow task
+apply plugin: 'application'
+
+archivesBaseName = 'elasticsearch-benchmarks'
+mainClassName = 'org.openjdk.jmh.Main'
+
+// never try to invoke tests on the benchmark project - there aren't any
+check.dependsOn.remove(test)
+// explicitly override the test task too in case somebody invokes 'gradle test' so it won't trip
+task test(type: Test, overwrite: true)
+
+dependencies {
+ compile("org.elasticsearch:elasticsearch:${version}") {
+ // JMH ships with the conflicting version 4.6 (JMH will not update this dependency as it is Java 6 compatible and joptsimple is one
+ // of the most recent compatible version). This prevents us from using jopt-simple in benchmarks (which should be ok) but allows us
+ // to invoke the JMH uberjar as usual.
+ exclude group: 'net.sf.jopt-simple', module: 'jopt-simple'
+ }
+ compile "org.openjdk.jmh:jmh-core:$versions.jmh"
+ compile "org.openjdk.jmh:jmh-generator-annprocess:$versions.jmh"
+ // Dependencies of JMH
+ runtime 'net.sf.jopt-simple:jopt-simple:4.6'
+ runtime 'org.apache.commons:commons-math3:3.2'
+}
+
+compileJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
+compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked"
+
+forbiddenApis {
+ // classes generated by JMH can use all sorts of forbidden APIs but we have no influence at all and cannot exclude these classes
+ ignoreFailures = true
+}
+
+// No licenses for our benchmark deps (we don't ship benchmarks)
+dependencyLicenses.enabled = false
+
+thirdPartyAudit.excludes = [
+ // these classes intentionally use JDK internal API (and this is ok since the project is maintained by Oracle employees)
+ 'org.openjdk.jmh.profile.AbstractHotspotProfiler',
+ 'org.openjdk.jmh.profile.HotspotThreadProfiler',
+ 'org.openjdk.jmh.profile.HotspotClassloadingProfiler',
+ 'org.openjdk.jmh.profile.HotspotCompilationProfiler',
+ 'org.openjdk.jmh.profile.HotspotMemoryProfiler',
+ 'org.openjdk.jmh.profile.HotspotRuntimeProfiler',
+ 'org.openjdk.jmh.util.Utils'
+]
+
+shadowJar {
+ classifier = 'benchmarks'
+}
+
+// alias the shadowJar and runShadow tasks to abstract from the concrete plugin that we are using and provide a more consistent interface
+task jmhJar(
+ dependsOn: shadowJar,
+ description: 'Generates an uberjar with the microbenchmarks and all dependencies',
+ group: 'Benchmark'
+)
+
+task jmh(
+ dependsOn: runShadow,
+ description: 'Runs all microbenchmarks',
+ group: 'Benchmark'
+)
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java
new file mode 100644
index 00000000000..5e5f35f6040
--- /dev/null
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java
@@ -0,0 +1,171 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
+import org.elasticsearch.cluster.routing.RoutingTable;
+import org.elasticsearch.cluster.routing.ShardRoutingState;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.common.settings.Settings;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Warmup;
+
+import java.util.Collections;
+import java.util.concurrent.TimeUnit;
+
+@Fork(3)
+@Warmup(iterations = 10)
+@Measurement(iterations = 10)
+@BenchmarkMode(Mode.AverageTime)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@State(Scope.Benchmark)
+@SuppressWarnings("unused") //invoked by benchmarking framework
+public class AllocationBenchmark {
+ // Do NOT make any field final (even if it is not annotated with @Param)! See also
+ // http://hg.openjdk.java.net/code-tools/jmh/file/tip/jmh-samples/src/main/java/org/openjdk/jmh/samples/JMHSample_10_ConstantFold.java
+
+ // we cannot use individual @Params as some will lead to invalid combinations which do not let the benchmark terminate. JMH offers no
+ // support to constrain the combinations of benchmark parameters and we do not want to rely on OptionsBuilder as each benchmark would
+ // need its own main method and we cannot execute more than one class with a main method per JAR.
+ @Param({
+ // indices, shards, replicas, nodes
+ " 10, 1, 0, 1",
+ " 10, 3, 0, 1",
+ " 10, 10, 0, 1",
+ " 100, 1, 0, 1",
+ " 100, 3, 0, 1",
+ " 100, 10, 0, 1",
+
+ " 10, 1, 0, 10",
+ " 10, 3, 0, 10",
+ " 10, 10, 0, 10",
+ " 100, 1, 0, 10",
+ " 100, 3, 0, 10",
+ " 100, 10, 0, 10",
+
+ " 10, 1, 1, 10",
+ " 10, 3, 1, 10",
+ " 10, 10, 1, 10",
+ " 100, 1, 1, 10",
+ " 100, 3, 1, 10",
+ " 100, 10, 1, 10",
+
+ " 10, 1, 2, 10",
+ " 10, 3, 2, 10",
+ " 10, 10, 2, 10",
+ " 100, 1, 2, 10",
+ " 100, 3, 2, 10",
+ " 100, 10, 2, 10",
+
+ " 10, 1, 0, 50",
+ " 10, 3, 0, 50",
+ " 10, 10, 0, 50",
+ " 100, 1, 0, 50",
+ " 100, 3, 0, 50",
+ " 100, 10, 0, 50",
+
+ " 10, 1, 1, 50",
+ " 10, 3, 1, 50",
+ " 10, 10, 1, 50",
+ " 100, 1, 1, 50",
+ " 100, 3, 1, 50",
+ " 100, 10, 1, 50",
+
+ " 10, 1, 2, 50",
+ " 10, 3, 2, 50",
+ " 10, 10, 2, 50",
+ " 100, 1, 2, 50",
+ " 100, 3, 2, 50",
+ " 100, 10, 2, 50"
+ })
+ public String indicesShardsReplicasNodes = "10,1,0,1";
+
+ public int numTags = 2;
+
+ private AllocationService strategy;
+ private ClusterState initialClusterState;
+
+ @Setup
+ public void setUp() throws Exception {
+ final String[] params = indicesShardsReplicasNodes.split(",");
+
+ int numIndices = toInt(params[0]);
+ int numShards = toInt(params[1]);
+ int numReplicas = toInt(params[2]);
+ int numNodes = toInt(params[3]);
+
+ strategy = Allocators.createAllocationService(Settings.builder()
+ .put("cluster.routing.allocation.awareness.attributes", "tag")
+ .build());
+
+ MetaData.Builder mb = MetaData.builder();
+ for (int i = 1; i <= numIndices; i++) {
+ mb.put(IndexMetaData.builder("test_" + i)
+ .settings(Settings.builder().put("index.version.created", Version.CURRENT))
+ .numberOfShards(numShards)
+ .numberOfReplicas(numReplicas)
+ );
+ }
+ MetaData metaData = mb.build();
+ RoutingTable.Builder rb = RoutingTable.builder();
+ for (int i = 1; i <= numIndices; i++) {
+ rb.addAsNew(metaData.index("test_" + i));
+ }
+ RoutingTable routingTable = rb.build();
+ DiscoveryNodes.Builder nb = DiscoveryNodes.builder();
+ for (int i = 1; i <= numNodes; i++) {
+ nb.put(Allocators.newNode("node" + i, Collections.singletonMap("tag", "tag_" + (i % numTags))));
+ }
+ initialClusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
+ .metaData(metaData).routingTable(routingTable).nodes
+ (nb).build();
+ }
+
+ private int toInt(String v) {
+ return Integer.valueOf(v.trim());
+ }
+
+ @Benchmark
+ public ClusterState measureAllocation() {
+ ClusterState clusterState = initialClusterState;
+ while (clusterState.getRoutingNodes().hasUnassignedShards()) {
+ RoutingAllocation.Result result = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes()
+ .shardsWithState(ShardRoutingState.INITIALIZING));
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ result = strategy.reroute(clusterState, "reroute");
+ clusterState = ClusterState.builder(clusterState).routingResult(result).build();
+ }
+ return clusterState;
+ }
+}
diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java
new file mode 100644
index 00000000000..97fbda80dc6
--- /dev/null
+++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.benchmark.routing.allocation;
+
+import org.elasticsearch.Version;
+import org.elasticsearch.cluster.ClusterModule;
+import org.elasticsearch.cluster.EmptyClusterInfoService;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.routing.allocation.AllocationService;
+import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
+import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
+import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
+import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.LocalTransportAddress;
+import org.elasticsearch.common.util.set.Sets;
+import org.elasticsearch.gateway.GatewayAllocator;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public final class Allocators {
+ private static class NoopGatewayAllocator extends GatewayAllocator {
+ public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator();
+
+ protected NoopGatewayAllocator() {
+ super(Settings.EMPTY, null, null);
+ }
+
+ @Override
+ public void applyStartedShards(StartedRerouteAllocation allocation) {
+ // noop
+ }
+
+ @Override
+ public void applyFailedShards(FailedRerouteAllocation allocation) {
+ // noop
+ }
+
+ @Override
+ public boolean allocateUnassigned(RoutingAllocation allocation) {
+ return false;
+ }
+ }
+
+ private Allocators() {
+ throw new AssertionError("Do not instantiate");
+ }
+
+
+ public static AllocationService createAllocationService(Settings settings) throws NoSuchMethodException, InstantiationException,
+ IllegalAccessException, InvocationTargetException {
+ return createAllocationService(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings
+ .BUILT_IN_CLUSTER_SETTINGS));
+ }
+
+ public static AllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings) throws
+ InvocationTargetException, NoSuchMethodException, InstantiationException, IllegalAccessException {
+ return new AllocationService(settings,
+ defaultAllocationDeciders(settings, clusterSettings),
+ NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE);
+ }
+
+ public static AllocationDeciders defaultAllocationDeciders(Settings settings, ClusterSettings clusterSettings) throws
+ IllegalAccessException, InvocationTargetException, InstantiationException, NoSuchMethodException {
+ List list = new ArrayList<>();
+ // Keep a deterministic order of allocation deciders for the benchmark
+ for (Class extends AllocationDecider> deciderClass : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
+ try {
+ Constructor extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class, ClusterSettings
+ .class);
+ list.add(constructor.newInstance(settings, clusterSettings));
+ } catch (NoSuchMethodException e) {
+ Constructor extends AllocationDecider> constructor = deciderClass.getConstructor(Settings.class);
+ list.add(constructor.newInstance(settings));
+ }
+ }
+ return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0]));
+
+ }
+
+ public static DiscoveryNode newNode(String nodeId, Map attributes) {
+ return new DiscoveryNode("", nodeId, LocalTransportAddress.buildUnique(), attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER,
+ DiscoveryNode.Role.DATA), Version.CURRENT);
+ }
+}
diff --git a/benchmarks/src/main/resources/log4j.properties b/benchmarks/src/main/resources/log4j.properties
new file mode 100644
index 00000000000..8ca1bc87295
--- /dev/null
+++ b/benchmarks/src/main/resources/log4j.properties
@@ -0,0 +1,8 @@
+# Do not log at all if it is not really critical - we're in a benchmark
+benchmarks.es.logger.level=ERROR
+log4j.rootLogger=${benchmarks.es.logger.level}, out
+
+log4j.appender.out=org.apache.log4j.ConsoleAppender
+log4j.appender.out.layout=org.apache.log4j.PatternLayout
+log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n
+
diff --git a/build.gradle b/build.gradle
index ad1f2456dea..540f27503f0 100644
--- a/build.gradle
+++ b/build.gradle
@@ -27,6 +27,31 @@ import org.apache.tools.ant.taskdefs.condition.Os
subprojects {
group = 'org.elasticsearch'
version = org.elasticsearch.gradle.VersionProperties.elasticsearch
+ description = "Elasticsearch subproject ${project.path}"
+
+ // we only use maven publish to add tasks for pom generation
+ plugins.withType(MavenPublishPlugin).whenPluginAdded {
+ publishing {
+ publications {
+ // add license information to generated poms
+ all {
+ pom.withXml { XmlProvider xml ->
+ Node node = xml.asNode()
+ node.appendNode('inceptionYear', '2009')
+
+ Node license = node.appendNode('licenses').appendNode('license')
+ license.appendNode('name', 'The Apache Software License, Version 2.0')
+ license.appendNode('url', 'http://www.apache.org/licenses/LICENSE-2.0.txt')
+ license.appendNode('distribution', 'repo')
+
+ Node developer = node.appendNode('developers').appendNode('developer')
+ developer.appendNode('name', 'Elastic')
+ developer.appendNode('url', 'http://www.elastic.co')
+ }
+ }
+ }
+ }
+ }
plugins.withType(NexusPlugin).whenPluginAdded {
modifyPom {
@@ -56,7 +81,7 @@ subprojects {
nexus {
String buildSnapshot = System.getProperty('build.snapshot', 'true')
if (buildSnapshot == 'false') {
- Repository repo = new RepositoryBuilder().findGitDir(new File('.')).build()
+ Repository repo = new RepositoryBuilder().findGitDir(project.rootDir).build()
String shortHash = repo.resolve('HEAD')?.name?.substring(0,7)
repositoryUrl = project.hasProperty('build.repository') ? project.property('build.repository') : "file://${System.getenv('HOME')}/elasticsearch-releases/${version}-${shortHash}/"
}
@@ -119,6 +144,14 @@ subprojects {
// see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959
javadoc.options.encoding='UTF8'
javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet')
+ /*
+ TODO: building javadocs with java 9 b118 is currently broken with weird errors, so
+ for now this is commented out...try again with the next ea build...
+ javadoc.executable = new File(project.javaHome, 'bin/javadoc')
+ if (project.javaVersion == JavaVersion.VERSION_1_9) {
+ // TODO: remove this hack! gradle should be passing this...
+ javadoc.options.addStringOption('source', '8')
+ }*/
}
}
@@ -127,8 +160,12 @@ subprojects {
them as external dependencies so the build plugin that we use can be used
to build elasticsearch plugins outside of the elasticsearch source tree. */
ext.projectSubstitutions = [
+ "org.elasticsearch.gradle:build-tools:${version}": ':build-tools',
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
"org.elasticsearch:elasticsearch:${version}": ':core',
+ "org.elasticsearch.client:rest:${version}": ':client:rest',
+ "org.elasticsearch.client:sniffer:${version}": ':client:sniffer',
+ "org.elasticsearch.client:test:${version}": ':client:test',
"org.elasticsearch.test:framework:${version}": ':test:framework',
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
@@ -224,7 +261,6 @@ allprojects {
idea {
project {
- languageLevel = org.elasticsearch.gradle.BuildPlugin.minimumJava.toString()
vcs = 'Git'
}
}
@@ -236,13 +272,6 @@ tasks.idea.doLast {
if (System.getProperty('idea.active') != null && ideaMarker.exists() == false) {
throw new GradleException('You must run gradle idea from the root of elasticsearch before importing into IntelliJ')
}
-// add buildSrc itself as a groovy project
-task buildSrcIdea(type: GradleBuild) {
- buildFile = 'buildSrc/build.gradle'
- tasks = ['cleanIdea', 'ideaModule']
-}
-tasks.idea.dependsOn(buildSrcIdea)
-
// eclipse configuration
allprojects {
@@ -278,20 +307,14 @@ allprojects {
into '.settings'
}
// otherwise .settings is not nuked entirely
- tasks.cleanEclipse {
+ task wipeEclipseSettings(type: Delete) {
delete '.settings'
}
+ tasks.cleanEclipse.dependsOn(wipeEclipseSettings)
// otherwise the eclipse merging is *super confusing*
tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)
}
-// add buildSrc itself as a groovy project
-task buildSrcEclipse(type: GradleBuild) {
- buildFile = 'buildSrc/build.gradle'
- tasks = ['cleanEclipse', 'eclipse']
-}
-tasks.eclipse.dependsOn(buildSrcEclipse)
-
// we need to add the same --debug-jvm option as
// the real RunTask has, so we can pass it through
class Run extends DefaultTask {
diff --git a/buildSrc/.gitignore b/buildSrc/.gitignore
new file mode 100644
index 00000000000..bfdaf60b97e
--- /dev/null
+++ b/buildSrc/.gitignore
@@ -0,0 +1 @@
+build-bootstrap/
diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle
index e36451311e7..1be5020f4f8 100644
--- a/buildSrc/build.gradle
+++ b/buildSrc/build.gradle
@@ -1,5 +1,3 @@
-import java.nio.file.Files
-
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
@@ -19,25 +17,31 @@ import java.nio.file.Files
* under the License.
*/
-// we must use buildscript + apply so that an external plugin
-// can apply this file, since the plugins directive is not
-// supported through file includes
-buildscript {
- repositories {
- jcenter()
- }
- dependencies {
- classpath 'com.bmuschko:gradle-nexus-plugin:2.3.1'
- }
-}
+import java.nio.file.Files
+
apply plugin: 'groovy'
-apply plugin: 'com.bmuschko.nexus'
-// TODO: move common IDE configuration to a common file to include
-apply plugin: 'idea'
-apply plugin: 'eclipse'
group = 'org.elasticsearch.gradle'
-archivesBaseName = 'build-tools'
+
+// TODO: remove this when upgrading to a version that supports ProgressLogger
+// gradle 2.14 made internal apis unavailable to plugins, and gradle considered
+// ProgressLogger to be an internal api. Until this is made available again,
+// we can't upgrade without losing our nice progress logging
+// NOTE that this check duplicates that in BuildPlugin, but we need to check
+// early here before trying to compile the broken classes in buildSrc
+if (GradleVersion.current() != GradleVersion.version('2.13')) {
+ throw new GradleException('Gradle 2.13 is required to build elasticsearch')
+}
+
+if (project == rootProject) {
+ // change the build dir used during build init, so that doing a clean
+ // won't wipe out the buildscript jar
+ buildDir = 'build-bootstrap'
+}
+
+/*****************************************************************************
+ * Propagating version.properties to the rest of the build *
+ *****************************************************************************/
Properties props = new Properties()
props.load(project.file('version.properties').newDataInputStream())
@@ -51,32 +55,6 @@ if (snapshot) {
props.put("elasticsearch", version);
}
-
-repositories {
- mavenCentral()
- maven {
- name 'sonatype-snapshots'
- url "https://oss.sonatype.org/content/repositories/snapshots/"
- }
- jcenter()
-}
-
-dependencies {
- compile gradleApi()
- compile localGroovy()
- compile "com.carrotsearch.randomizedtesting:junit4-ant:${props.getProperty('randomizedrunner')}"
- compile("junit:junit:${props.getProperty('junit')}") {
- transitive = false
- }
- compile 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3'
- compile 'com.netflix.nebula:gradle-info-plugin:3.0.3'
- compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
- compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
- compile 'de.thetaphi:forbiddenapis:2.0'
- compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
- compile 'org.apache.rat:apache-rat:0.11'
-}
-
File tempPropertiesFile = new File(project.buildDir, "version.properties")
task writeVersionProperties {
inputs.properties(props)
@@ -96,31 +74,84 @@ processResources {
from tempPropertiesFile
}
-extraArchive {
- javadoc = false
- tests = false
+/*****************************************************************************
+ * Dependencies used by the entire build *
+ *****************************************************************************/
+
+repositories {
+ jcenter()
}
-idea {
- module {
- inheritOutputDirs = false
- outputDir = file('build-idea/classes/main')
- testOutputDir = file('build-idea/classes/test')
+dependencies {
+ compile gradleApi()
+ compile localGroovy()
+ compile "com.carrotsearch.randomizedtesting:junit4-ant:${props.getProperty('randomizedrunner')}"
+ compile("junit:junit:${props.getProperty('junit')}") {
+ transitive = false
+ }
+ compile 'com.netflix.nebula:gradle-extra-configurations-plugin:3.0.3'
+ compile 'com.netflix.nebula:nebula-publishing-plugin:4.4.4'
+ compile 'com.netflix.nebula:gradle-info-plugin:3.0.3'
+ compile 'org.eclipse.jgit:org.eclipse.jgit:3.2.0.201312181205-r'
+ compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
+ compile 'de.thetaphi:forbiddenapis:2.2'
+ compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
+ compile 'org.apache.rat:apache-rat:0.11'
+ compile 'ru.vyarus:gradle-animalsniffer-plugin:1.0.1'
+}
+
+
+/*****************************************************************************
+ * Bootstrap repositories *
+ *****************************************************************************/
+// this will only happen when buildSrc is built on its own during build init
+if (project == rootProject) {
+
+ repositories {
+ mavenCentral()
+ maven {
+ name 'sonatype-snapshots'
+ url "https://oss.sonatype.org/content/repositories/snapshots/"
+ }
+ }
+ test.exclude 'org/elasticsearch/test/NamingConventionsCheckBadClasses*'
+}
+
+/*****************************************************************************
+ * Normal project checks *
+ *****************************************************************************/
+
+// this happens when included as a normal project in the build, which we do
+// to enforce precommit checks like forbidden apis, as well as setup publishing
+if (project != rootProject) {
+ apply plugin: 'elasticsearch.build'
+ apply plugin: 'nebula.maven-base-publish'
+ apply plugin: 'nebula.maven-scm'
+
+ // groovydoc succeeds, but has some weird internal exception...
+ groovydoc.enabled = false
+
+ // build-tools is not ready for primetime with these...
+ dependencyLicenses.enabled = false
+ forbiddenApisMain.enabled = false
+ forbiddenApisTest.enabled = false
+ jarHell.enabled = false
+ thirdPartyAudit.enabled = false
+
+ // test for elasticsearch.build tries to run with ES...
+ test.enabled = false
+
+ // TODO: re-enable once randomizedtesting gradle code is published and removed from here
+ licenseHeaders.enabled = false
+
+ forbiddenPatterns {
+ exclude '**/*.wav'
+ // the file that actually defines nocommit
+ exclude '**/ForbiddenPatternsTask.groovy'
+ }
+
+ namingConventions {
+ testClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$UnitTestCase'
+ integTestClass = 'org.elasticsearch.test.NamingConventionsCheckBadClasses$IntegTestCase'
}
}
-
-eclipse {
- classpath {
- defaultOutputDir = file('build-eclipse')
- }
-}
-
-task copyEclipseSettings(type: Copy) {
- from project.file('src/main/resources/eclipse.settings')
- into '.settings'
-}
-// otherwise .settings is not nuked entirely
-tasks.cleanEclipse {
- delete '.settings'
-}
-tasks.eclipse.dependsOn(cleanEclipse, copyEclipseSettings)
diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy
index 450d3645182..6ed6ecf8619 100644
--- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy
+++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/TestReportLogger.groovy
@@ -28,12 +28,6 @@ import org.gradle.api.logging.LogLevel
import org.gradle.api.logging.Logger
import org.junit.runner.Description
-import javax.sound.sampled.AudioSystem
-import javax.sound.sampled.Clip
-import javax.sound.sampled.Line
-import javax.sound.sampled.LineEvent
-import javax.sound.sampled.LineListener
-import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.atomic.AtomicInteger
import static com.carrotsearch.ant.tasks.junit4.FormattingUtils.formatDescription
@@ -123,36 +117,9 @@ class TestReportLogger extends TestsSummaryEventListener implements AggregatedEv
formatTime(e.getCurrentTime()) + ", stalled for " +
formatDurationInSeconds(e.getNoEventDuration()) + " at: " +
(e.getDescription() == null ? "" : formatDescription(e.getDescription())))
- try {
- playBeat();
- } catch (Exception nosound) { /* handling exceptions with style */ }
slowTestsFound = true
}
- void playBeat() throws Exception {
- Clip clip = (Clip)AudioSystem.getLine(new Line.Info(Clip.class));
- final AtomicBoolean stop = new AtomicBoolean();
- clip.addLineListener(new LineListener() {
- @Override
- public void update(LineEvent event) {
- if (event.getType() == LineEvent.Type.STOP) {
- stop.set(true);
- }
- }
- });
- InputStream stream = getClass().getResourceAsStream("/beat.wav");
- try {
- clip.open(AudioSystem.getAudioInputStream(stream));
- clip.start();
- while (!stop.get()) {
- Thread.sleep(20);
- }
- clip.close();
- } finally {
- stream.close();
- }
- }
-
@Subscribe
void onQuit(AggregatedQuitEvent e) throws IOException {
if (config.showNumFailuresAtEnd > 0 && !failedTests.isEmpty()) {
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
index ab2ba5abfef..8d77e7a9a34 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy
@@ -19,6 +19,7 @@
package org.elasticsearch.gradle
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
+import nebula.plugin.publishing.maven.MavenBasePublishPlugin
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.GradleException
import org.gradle.api.JavaVersion
@@ -33,6 +34,8 @@ import org.gradle.api.artifacts.ProjectDependency
import org.gradle.api.artifacts.ResolvedArtifact
import org.gradle.api.artifacts.dsl.RepositoryHandler
import org.gradle.api.artifacts.maven.MavenPom
+import org.gradle.api.publish.maven.MavenPublication
+import org.gradle.api.publish.maven.tasks.GenerateMavenPom
import org.gradle.api.tasks.bundling.Jar
import org.gradle.api.tasks.compile.JavaCompile
import org.gradle.internal.jvm.Jvm
@@ -54,7 +57,7 @@ class BuildPlugin implements Plugin {
project.pluginManager.apply('java')
project.pluginManager.apply('carrotsearch.randomized-testing')
// these plugins add lots of info to our jars
- configureJarManifest(project) // jar config must be added before info broker
+ configureJars(project) // jar config must be added before info broker
project.pluginManager.apply('nebula.info-broker')
project.pluginManager.apply('nebula.info-basic')
project.pluginManager.apply('nebula.info-java')
@@ -68,6 +71,7 @@ class BuildPlugin implements Plugin {
configureConfigurations(project)
project.ext.versions = VersionProperties.versions
configureCompile(project)
+ configurePomGeneration(project)
configureTest(project)
configurePrecommit(project)
@@ -109,7 +113,7 @@ class BuildPlugin implements Plugin {
}
// enforce gradle version
- GradleVersion minGradle = GradleVersion.version('2.8')
+ GradleVersion minGradle = GradleVersion.version('2.13')
if (GradleVersion.current() < minGradle) {
throw new GradleException("${minGradle} or above is required to build elasticsearch")
}
@@ -139,7 +143,7 @@ class BuildPlugin implements Plugin {
}
project.rootProject.ext.javaHome = javaHome
- project.rootProject.ext.javaVersion = javaVersion
+ project.rootProject.ext.javaVersion = javaVersionEnum
project.rootProject.ext.buildChecksDone = true
}
project.targetCompatibility = minimumJava
@@ -228,7 +232,7 @@ class BuildPlugin implements Plugin {
*/
static void configureConfigurations(Project project) {
// we are not shipping these jars, we act like dumb consumers of these things
- if (project.path.startsWith(':test:fixtures')) {
+ if (project.path.startsWith(':test:fixtures') || project.path == ':build-tools') {
return
}
// fail on any conflicting dependency versions
@@ -266,44 +270,7 @@ class BuildPlugin implements Plugin {
// add exclusions to the pom directly, for each of the transitive deps of this project's deps
project.modifyPom { MavenPom pom ->
- pom.withXml { XmlProvider xml ->
- // first find if we have dependencies at all, and grab the node
- NodeList depsNodes = xml.asNode().get('dependencies')
- if (depsNodes.isEmpty()) {
- return
- }
-
- // check each dependency for any transitive deps
- for (Node depNode : depsNodes.get(0).children()) {
- String groupId = depNode.get('groupId').get(0).text()
- String artifactId = depNode.get('artifactId').get(0).text()
- String version = depNode.get('version').get(0).text()
-
- // collect the transitive deps now that we know what this dependency is
- String depConfig = transitiveDepConfigName(groupId, artifactId, version)
- Configuration configuration = project.configurations.findByName(depConfig)
- if (configuration == null) {
- continue // we did not make this dep non-transitive
- }
- Set artifacts = configuration.resolvedConfiguration.resolvedArtifacts
- if (artifacts.size() <= 1) {
- // this dep has no transitive deps (or the only artifact is itself)
- continue
- }
-
- // we now know we have something to exclude, so add the exclusion elements
- Node exclusions = depNode.appendNode('exclusions')
- for (ResolvedArtifact transitiveArtifact : artifacts) {
- ModuleVersionIdentifier transitiveDep = transitiveArtifact.moduleVersion.id
- if (transitiveDep.group == groupId && transitiveDep.name == artifactId) {
- continue; // don't exclude the dependency itself!
- }
- Node exclusion = exclusions.appendNode('exclusion')
- exclusion.appendNode('groupId', transitiveDep.group)
- exclusion.appendNode('artifactId', transitiveDep.name)
- }
- }
- }
+ pom.withXml(removeTransitiveDependencies(project))
}
}
@@ -332,6 +299,70 @@ class BuildPlugin implements Plugin {
}
}
+ /** Returns a closure which can be used with a MavenPom for removing transitive dependencies. */
+ private static Closure removeTransitiveDependencies(Project project) {
+ // TODO: remove this when enforcing gradle 2.13+, it now properly handles exclusions
+ return { XmlProvider xml ->
+ // first find if we have dependencies at all, and grab the node
+ NodeList depsNodes = xml.asNode().get('dependencies')
+ if (depsNodes.isEmpty()) {
+ return
+ }
+
+ // check each dependency for any transitive deps
+ for (Node depNode : depsNodes.get(0).children()) {
+ String groupId = depNode.get('groupId').get(0).text()
+ String artifactId = depNode.get('artifactId').get(0).text()
+ String version = depNode.get('version').get(0).text()
+
+ // collect the transitive deps now that we know what this dependency is
+ String depConfig = transitiveDepConfigName(groupId, artifactId, version)
+ Configuration configuration = project.configurations.findByName(depConfig)
+ if (configuration == null) {
+ continue // we did not make this dep non-transitive
+ }
+ Set artifacts = configuration.resolvedConfiguration.resolvedArtifacts
+ if (artifacts.size() <= 1) {
+ // this dep has no transitive deps (or the only artifact is itself)
+ continue
+ }
+
+ // we now know we have something to exclude, so add the exclusion elements
+ Node exclusions = depNode.appendNode('exclusions')
+ for (ResolvedArtifact transitiveArtifact : artifacts) {
+ ModuleVersionIdentifier transitiveDep = transitiveArtifact.moduleVersion.id
+ if (transitiveDep.group == groupId && transitiveDep.name == artifactId) {
+ continue; // don't exclude the dependency itself!
+ }
+ Node exclusion = exclusions.appendNode('exclusion')
+ exclusion.appendNode('groupId', transitiveDep.group)
+ exclusion.appendNode('artifactId', transitiveDep.name)
+ }
+ }
+ }
+ }
+
+ /**Configuration generation of maven poms. */
+ public static void configurePomGeneration(Project project) {
+ project.plugins.withType(MavenBasePublishPlugin.class).whenPluginAdded {
+ project.publishing {
+ publications {
+ all { MavenPublication publication -> // we only deal with maven
+ // add exclusions to the pom directly, for each of the transitive deps of this project's deps
+ publication.pom.withXml(removeTransitiveDependencies(project))
+ }
+ }
+ }
+
+ project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom t ->
+ // place the pom next to the jar it is for
+ t.destination = new File(project.buildDir, "distributions/${project.archivesBaseName}-${project.version}.pom")
+ // build poms with assemble
+ project.assemble.dependsOn(t)
+ }
+ }
+ }
+
/** Adds compiler settings to the project */
static void configureCompile(Project project) {
project.ext.compactProfile = 'compact3'
@@ -341,32 +372,40 @@ class BuildPlugin implements Plugin {
options.fork = true
options.forkOptions.executable = new File(project.javaHome, 'bin/javac')
options.forkOptions.memoryMaximumSize = "1g"
+ if (project.targetCompatibility >= JavaVersion.VERSION_1_8) {
+ // compile with compact 3 profile by default
+ // NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
+ if (project.compactProfile != 'full') {
+ options.compilerArgs << '-profile' << project.compactProfile
+ }
+ }
/*
* -path because gradle will send in paths that don't always exist.
* -missing because we have tons of missing @returns and @param.
* -serial because we don't use java serialization.
*/
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
- options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial' << '-Xdoclint:all' << '-Xdoclint:-missing'
- // compile with compact 3 profile by default
- // NOTE: this is just a compile time check: does not replace testing with a compact3 JRE
- if (project.compactProfile != 'full') {
- options.compilerArgs << '-profile' << project.compactProfile
- }
+ options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing'
options.encoding = 'UTF-8'
//options.incremental = true
- // gradle ignores target/source compatibility when it is "unnecessary", but since to compile with
- // java 9, gradle is running in java 8, it incorrectly thinks it is unnecessary
- assert minimumJava == JavaVersion.VERSION_1_8
- options.compilerArgs << '-target' << '1.8' << '-source' << '1.8'
+ if (project.javaVersion == JavaVersion.VERSION_1_9) {
+ // hack until gradle supports java 9's new "-release" arg
+ assert minimumJava == JavaVersion.VERSION_1_8
+ options.compilerArgs << '-release' << '8'
+ project.sourceCompatibility = null
+ project.targetCompatibility = null
+ }
}
}
}
- /** Adds additional manifest info to jars */
- static void configureJarManifest(Project project) {
+ /** Adds additional manifest info to jars, and adds source and javadoc jars */
+ static void configureJars(Project project) {
project.tasks.withType(Jar) { Jar jarTask ->
+ // we put all our distributable files under distributions
+ jarTask.destinationDir = new File(project.buildDir, 'distributions')
+ // fixup the jar manifest
jarTask.doFirst {
boolean isSnapshot = VersionProperties.elasticsearch.endsWith("-SNAPSHOT");
String version = VersionProperties.elasticsearch;
@@ -422,7 +461,7 @@ class BuildPlugin implements Plugin {
// default test sysprop values
systemProperty 'tests.ifNoTests', 'fail'
// TODO: remove setting logging level via system property
- systemProperty 'es.logger.level', 'WARN'
+ systemProperty 'tests.logger.level', 'WARN'
for (Map.Entry property : System.properties.entrySet()) {
if (property.getKey().startsWith('tests.') ||
property.getKey().startsWith('es.')) {
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggedExec.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggedExec.groovy
index 1896cdf1b67..b1b04a2ded6 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggedExec.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/LoggedExec.groovy
@@ -26,14 +26,17 @@ import org.gradle.api.tasks.Exec
* A wrapper around gradle's Exec task to capture output and log on error.
*/
class LoggedExec extends Exec {
+
+ protected ByteArrayOutputStream output = new ByteArrayOutputStream()
+
LoggedExec() {
if (logger.isInfoEnabled() == false) {
- standardOutput = new ByteArrayOutputStream()
- errorOutput = standardOutput
+ standardOutput = output
+ errorOutput = output
ignoreExitValue = true
doLast {
if (execResult.exitValue != 0) {
- standardOutput.toString('UTF-8').eachLine { line -> logger.error(line) }
+ output.toString('UTF-8').eachLine { line -> logger.error(line) }
throw new GradleException("Process '${executable} ${args.join(' ')}' finished with non-zero exit value ${execResult.exitValue}")
}
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy
new file mode 100644
index 00000000000..3b1ec3c5d87
--- /dev/null
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.gradle.doc
+
+import org.elasticsearch.gradle.test.RestTestPlugin
+import org.gradle.api.Project
+import org.gradle.api.Task
+
+/**
+ * Sets up tests for documentation.
+ */
+public class DocsTestPlugin extends RestTestPlugin {
+
+ @Override
+ public void apply(Project project) {
+ super.apply(project)
+ Task listSnippets = project.tasks.create('listSnippets', SnippetsTask)
+ listSnippets.group 'Docs'
+ listSnippets.description 'List each snippet'
+ listSnippets.perSnippet { println(it.toString()) }
+
+ Task listConsoleCandidates = project.tasks.create(
+ 'listConsoleCandidates', SnippetsTask)
+ listConsoleCandidates.group 'Docs'
+ listConsoleCandidates.description
+ 'List snippets that probably should be marked // CONSOLE'
+ listConsoleCandidates.perSnippet {
+ if (
+ it.console // Already marked, nothing to do
+ || it.testResponse // It is a response
+ ) {
+ return
+ }
+ List languages = [
+ // These languages should almost always be marked console
+ 'js', 'json',
+ // These are often curl commands that should be converted but
+ // are probably false positives
+ 'sh', 'shell',
+ ]
+ if (false == languages.contains(it.language)) {
+ return
+ }
+ println(it.toString())
+ }
+
+ project.tasks.create('buildRestTests', RestTestsFromSnippetsTask)
+ }
+}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
new file mode 100644
index 00000000000..c7f4316ee04
--- /dev/null
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
@@ -0,0 +1,240 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gradle.doc
+
+import org.elasticsearch.gradle.doc.SnippetsTask.Snippet
+import org.gradle.api.InvalidUserDataException
+import org.gradle.api.tasks.Input
+import org.gradle.api.tasks.OutputDirectory
+
+import java.nio.file.Files
+import java.nio.file.Path
+import java.util.regex.Matcher
+
+/**
+ * Generates REST tests for each snippet marked // TEST.
+ */
+public class RestTestsFromSnippetsTask extends SnippetsTask {
+ @Input
+ Map setups = new HashMap()
+
+ /**
+ * Root directory of the tests being generated. To make rest tests happy
+ * we generate them in a testRoot() which is contained in this directory.
+ */
+ @OutputDirectory
+ File testRoot = project.file('build/rest')
+
+ public RestTestsFromSnippetsTask() {
+ project.afterEvaluate {
+ // Wait to set this so testRoot can be customized
+ project.sourceSets.test.output.dir(testRoot, builtBy: this)
+ }
+ TestBuilder builder = new TestBuilder()
+ doFirst { outputRoot().delete() }
+ perSnippet builder.&handleSnippet
+ doLast builder.&finishLastTest
+ }
+
+ /**
+ * Root directory containing all the files generated by this task. It is
+ * contained withing testRoot.
+ */
+ File outputRoot() {
+ return new File(testRoot, '/rest-api-spec/test')
+ }
+
+ private class TestBuilder {
+ private static final String SYNTAX = {
+ String method = /(?GET|PUT|POST|HEAD|OPTIONS|DELETE)/
+ String pathAndQuery = /(?[^\n]+)/
+ String badBody = /GET|PUT|POST|HEAD|OPTIONS|DELETE|#/
+ String body = /(?(?:\n(?!$badBody)[^\n]+)+)/
+ String nonComment = /$method\s+$pathAndQuery$body?/
+ String comment = /(?#.+)/
+ /(?:$comment|$nonComment)\n+/
+ }()
+
+ /**
+ * The file in which we saw the last snippet that made a test.
+ */
+ Path lastDocsPath
+
+ /**
+ * The file we're building.
+ */
+ PrintWriter current
+
+ /**
+ * Called each time a snippet is encountered. Tracks the snippets and
+ * calls buildTest to actually build the test.
+ */
+ void handleSnippet(Snippet snippet) {
+ if (snippet.language == 'json') {
+ throw new InvalidUserDataException(
+ "$snippet: Use `js` instead of `json`.")
+ }
+ if (snippet.testSetup) {
+ setup(snippet)
+ return
+ }
+ if (snippet.testResponse) {
+ response(snippet)
+ return
+ }
+ if (snippet.test || snippet.console) {
+ test(snippet)
+ return
+ }
+ // Must be an unmarked snippet....
+ }
+
+ private void test(Snippet test) {
+ setupCurrent(test)
+
+ if (false == test.continued) {
+ current.println('---')
+ current.println("\"$test.start\":")
+ }
+ if (test.skipTest) {
+ current.println(" - skip:")
+ current.println(" features: always_skip")
+ current.println(" reason: $test.skipTest")
+ }
+ if (test.setup != null) {
+ String setup = setups[test.setup]
+ if (setup == null) {
+ throw new InvalidUserDataException("Couldn't find setup "
+ + "for $test")
+ }
+ current.println(setup)
+ }
+
+ body(test, false)
+ }
+
+ private void response(Snippet response) {
+ current.println(" - match: ")
+ current.println(" \$body: ")
+ response.contents.eachLine { current.println(" $it") }
+ }
+
+ void emitDo(String method, String pathAndQuery,
+ String body, String catchPart, boolean inSetup) {
+ def (String path, String query) = pathAndQuery.tokenize('?')
+ current.println(" - do:")
+ if (catchPart != null) {
+ current.println(" catch: $catchPart")
+ }
+ current.println(" raw:")
+ current.println(" method: $method")
+ current.println(" path: \"$path\"")
+ if (query != null) {
+ for (String param: query.tokenize('&')) {
+ def (String name, String value) = param.tokenize('=')
+ if (value == null) {
+ value = ''
+ }
+ current.println(" $name: \"$value\"")
+ }
+ }
+ if (body != null) {
+ // Throw out the leading newline we get from parsing the body
+ body = body.substring(1)
+ current.println(" body: |")
+ body.eachLine { current.println(" $it") }
+ }
+ /* Catch any shard failures. These only cause a non-200 response if
+ * no shard succeeds. But we need to fail the tests on all of these
+ * because they mean invalid syntax or broken queries or something
+ * else that we don't want to teach people to do. The REST test
+ * framework doesn't allow us to has assertions in the setup
+ * section so we have to skip it there. We also have to skip _cat
+ * actions because they don't return json so we can't is_false
+ * them. That is ok because they don't have this
+ * partial-success-is-success thing.
+ */
+ if (false == inSetup && false == path.startsWith('_cat')) {
+ current.println(" - is_false: _shards.failures")
+ }
+ }
+
+ private void setup(Snippet setup) {
+ if (lastDocsPath == setup.path) {
+ throw new InvalidUserDataException("$setup: wasn't first")
+ }
+ setupCurrent(setup)
+ current.println('---')
+ current.println("setup:")
+ body(setup, true)
+ // always wait for yellow before anything is executed
+ current.println(
+ " - do:\n" +
+ " raw:\n" +
+ " method: GET\n" +
+ " path: \"_cluster/health\"\n" +
+ " wait_for_status: \"yellow\"")
+ }
+
+ private void body(Snippet snippet, boolean inSetup) {
+ parse("$snippet", snippet.contents, SYNTAX) { matcher, last ->
+ if (matcher.group("comment") != null) {
+ // Comment
+ return
+ }
+ String method = matcher.group("method")
+ String pathAndQuery = matcher.group("pathAndQuery")
+ String body = matcher.group("body")
+ String catchPart = last ? snippet.catchPart : null
+ if (pathAndQuery.startsWith('/')) {
+ // Leading '/'s break the generated paths
+ pathAndQuery = pathAndQuery.substring(1)
+ }
+ emitDo(method, pathAndQuery, body, catchPart, inSetup)
+ }
+ }
+
+ private PrintWriter setupCurrent(Snippet test) {
+ if (lastDocsPath == test.path) {
+ return
+ }
+ finishLastTest()
+ lastDocsPath = test.path
+
+ // Make the destination file:
+ // Shift the path into the destination directory tree
+ Path dest = outputRoot().toPath().resolve(test.path)
+ // Replace the extension
+ String fileName = dest.getName(dest.nameCount - 1)
+ dest = dest.parent.resolve(fileName.replace('.asciidoc', '.yaml'))
+
+ // Now setup the writer
+ Files.createDirectories(dest.parent)
+ current = dest.newPrintWriter('UTF-8')
+ }
+
+ void finishLastTest() {
+ if (current != null) {
+ current.close()
+ current = null
+ }
+ }
+ }
+}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy
new file mode 100644
index 00000000000..afd91858e9d
--- /dev/null
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy
@@ -0,0 +1,308 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.gradle.doc
+
+import org.gradle.api.DefaultTask
+import org.gradle.api.InvalidUserDataException
+import org.gradle.api.file.ConfigurableFileTree
+import org.gradle.api.tasks.InputFiles
+import org.gradle.api.tasks.TaskAction
+
+import java.nio.file.Path
+import java.util.regex.Matcher
+
+/**
+ * A task which will run a closure on each snippet in the documentation.
+ */
+public class SnippetsTask extends DefaultTask {
+ private static final String SCHAR = /(?:\\\/|[^\/])/
+ private static final String SUBSTITUTION = /s\/($SCHAR+)\/($SCHAR*)\//
+ private static final String CATCH = /catch:\s*((?:\/[^\/]+\/)|[^ \]]+)/
+ private static final String SKIP = /skip:([^\]]+)/
+ private static final String SETUP = /setup:([^ \]]+)/
+ private static final String TEST_SYNTAX =
+ /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP) ?/
+
+ /**
+ * Action to take on each snippet. Called with a single parameter, an
+ * instance of Snippet.
+ */
+ Closure perSnippet
+
+ /**
+ * The docs to scan. Defaults to every file in the directory exception the
+ * build.gradle file because that is appropriate for Elasticsearch's docs
+ * directory.
+ */
+ @InputFiles
+ ConfigurableFileTree docs = project.fileTree(project.projectDir) {
+ // No snippets in the build file
+ exclude 'build.gradle'
+ // That is where the snippets go, not where they come from!
+ exclude 'build'
+ }
+
+ @TaskAction
+ public void executeTask() {
+ /*
+ * Walks each line of each file, building snippets as it encounters
+ * the lines that make up the snippet.
+ */
+ for (File file: docs) {
+ String lastLanguage
+ int lastLanguageLine
+ Snippet snippet = null
+ StringBuilder contents = null
+ List substitutions = null
+ Closure emit = {
+ snippet.contents = contents.toString()
+ contents = null
+ if (substitutions != null) {
+ substitutions.each { String pattern, String subst ->
+ /*
+ * $body is really common but it looks like a
+ * backreference so we just escape it here to make the
+ * tests cleaner.
+ */
+ subst = subst.replace('$body', '\\$body')
+ // \n is a new line....
+ subst = subst.replace('\\n', '\n')
+ snippet.contents = snippet.contents.replaceAll(
+ pattern, subst)
+ }
+ substitutions = null
+ }
+ perSnippet(snippet)
+ snippet = null
+ }
+ file.eachLine('UTF-8') { String line, int lineNumber ->
+ Matcher matcher
+ if (line ==~ /-{4,}\s*/) { // Four dashes looks like a snippet
+ if (snippet == null) {
+ Path path = docs.dir.toPath().relativize(file.toPath())
+ snippet = new Snippet(path: path, start: lineNumber)
+ if (lastLanguageLine == lineNumber - 1) {
+ snippet.language = lastLanguage
+ }
+ } else {
+ snippet.end = lineNumber
+ }
+ return
+ }
+ matcher = line =~ /\[source,(\w+)]\s*/
+ if (matcher.matches()) {
+ lastLanguage = matcher.group(1)
+ lastLanguageLine = lineNumber
+ return
+ }
+ if (line ==~ /\/\/\s*AUTOSENSE\s*/) {
+ throw new InvalidUserDataException("AUTOSENSE has been " +
+ "replaced by CONSOLE. Use that instead at " +
+ "$file:$lineNumber")
+ }
+ if (line ==~ /\/\/\s*CONSOLE\s*/) {
+ if (snippet == null) {
+ throw new InvalidUserDataException("CONSOLE not " +
+ "paired with a snippet at $file:$lineNumber")
+ }
+ snippet.console = true
+ return
+ }
+ matcher = line =~ /\/\/\s*TEST(\[(.+)\])?\s*/
+ if (matcher.matches()) {
+ if (snippet == null) {
+ throw new InvalidUserDataException("TEST not " +
+ "paired with a snippet at $file:$lineNumber")
+ }
+ snippet.test = true
+ if (matcher.group(2) != null) {
+ String loc = "$file:$lineNumber"
+ parse(loc, matcher.group(2), TEST_SYNTAX) {
+ if (it.group(1) != null) {
+ snippet.catchPart = it.group(1)
+ return
+ }
+ if (it.group(2) != null) {
+ if (substitutions == null) {
+ substitutions = []
+ }
+ substitutions.add([it.group(2), it.group(3)])
+ return
+ }
+ if (it.group(4) != null) {
+ snippet.skipTest = it.group(4)
+ return
+ }
+ if (it.group(5) != null) {
+ snippet.continued = true
+ return
+ }
+ if (it.group(6) != null) {
+ snippet.setup = it.group(6)
+ return
+ }
+ throw new InvalidUserDataException(
+ "Invalid test marker: $line")
+ }
+ }
+ return
+ }
+ matcher = line =~ /\/\/\s*TESTRESPONSE(\[(.+)\])?\s*/
+ if (matcher.matches()) {
+ if (snippet == null) {
+ throw new InvalidUserDataException("TESTRESPONSE not " +
+ "paired with a snippet at $file:$lineNumber")
+ }
+ snippet.testResponse = true
+ if (matcher.group(2) != null) {
+ if (substitutions == null) {
+ substitutions = []
+ }
+ String loc = "$file:$lineNumber"
+ parse(loc, matcher.group(2), /$SUBSTITUTION ?/) {
+ substitutions.add([it.group(1), it.group(2)])
+ }
+ }
+ return
+ }
+ if (line ==~ /\/\/\s*TESTSETUP\s*/) {
+ snippet.testSetup = true
+ return
+ }
+ if (snippet == null) {
+ // Outside
+ return
+ }
+ if (snippet.end == Snippet.NOT_FINISHED) {
+ // Inside
+ if (contents == null) {
+ contents = new StringBuilder()
+ }
+ // We don't need the annotations
+ line = line.replaceAll(/<\d+>/, '')
+ // Nor any trailing spaces
+ line = line.replaceAll(/\s+$/, '')
+ contents.append(line).append('\n')
+ return
+ }
+ // Just finished
+ emit()
+ }
+ if (snippet != null) emit()
+ }
+ }
+
+ static class Snippet {
+ static final int NOT_FINISHED = -1
+
+ /**
+ * Path to the file containing this snippet. Relative to docs.dir of the
+ * SnippetsTask that created it.
+ */
+ Path path
+ int start
+ int end = NOT_FINISHED
+ String contents
+
+ boolean console = false
+ boolean test = false
+ boolean testResponse = false
+ boolean testSetup = false
+ String skipTest = null
+ boolean continued = false
+ String language = null
+ String catchPart = null
+ String setup = null
+
+ @Override
+ public String toString() {
+ String result = "$path[$start:$end]"
+ if (language != null) {
+ result += "($language)"
+ }
+ if (console) {
+ result += '// CONSOLE'
+ }
+ if (test) {
+ result += '// TEST'
+ if (catchPart) {
+ result += "[catch: $catchPart]"
+ }
+ if (skipTest) {
+ result += "[skip=$skipTest]"
+ }
+ if (continued) {
+ result += '[continued]'
+ }
+ if (setup) {
+ result += "[setup:$setup]"
+ }
+ }
+ if (testResponse) {
+ result += '// TESTRESPONSE'
+ }
+ if (testSetup) {
+ result += '// TESTSETUP'
+ }
+ return result
+ }
+ }
+
+ /**
+ * Repeatedly match the pattern to the string, calling the closure with the
+ * matchers each time there is a match. If there are characters that don't
+ * match then blow up. If the closure takes two parameters then the second
+ * one is "is this the last match?".
+ */
+ protected parse(String location, String s, String pattern, Closure c) {
+ if (s == null) {
+ return // Silly null, only real stuff gets to match!
+ }
+ Matcher m = s =~ pattern
+ int offset = 0
+ Closure extraContent = { message ->
+ StringBuilder cutOut = new StringBuilder()
+ cutOut.append(s[offset - 6..offset - 1])
+ cutOut.append('*')
+ cutOut.append(s[offset..Math.min(offset + 5, s.length() - 1)])
+ String cutOutNoNl = cutOut.toString().replace('\n', '\\n')
+ throw new InvalidUserDataException("$location: Extra content "
+ + "$message ('$cutOutNoNl') matching [$pattern]: $s")
+ }
+ while (m.find()) {
+ if (m.start() != offset) {
+ extraContent("between [$offset] and [${m.start()}]")
+ }
+ offset = m.end()
+ if (c.maximumNumberOfParameters == 1) {
+ c(m)
+ } else {
+ c(m, offset == s.length())
+ }
+ }
+ if (offset == 0) {
+ throw new InvalidUserDataException("$location: Didn't match "
+ + "$pattern: $s")
+ }
+ if (offset != s.length()) {
+ extraContent("after [$offset]")
+ }
+ }
+}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy
index b04f959e068..ba013da31e9 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy
@@ -18,14 +18,14 @@
*/
package org.elasticsearch.gradle.plugin
+import nebula.plugin.publishing.maven.MavenBasePublishPlugin
+import nebula.plugin.publishing.maven.MavenScmPlugin
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.test.RestIntegTestTask
import org.elasticsearch.gradle.test.RunTask
import org.gradle.api.Project
-import org.gradle.api.artifacts.Dependency
import org.gradle.api.tasks.SourceSet
import org.gradle.api.tasks.bundling.Zip
-
/**
* Encapsulates build configuration for an Elasticsearch plugin.
*/
@@ -50,10 +50,11 @@ public class PluginBuildPlugin extends BuildPlugin {
} else {
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
+ addPomGeneration(project)
}
project.namingConventions {
- // Plugins decalare extensions of ESIntegTestCase as "Tests" instead of IT.
+ // Plugins declare integration tests as "Tests" instead of IT.
skipIntegTestInDisguise = true
}
}
@@ -125,4 +126,32 @@ public class PluginBuildPlugin extends BuildPlugin {
project.configurations.getByName('default').extendsFrom = []
project.artifacts.add('default', bundle)
}
+
+ /**
+ * Adds the plugin jar and zip as publications.
+ */
+ protected static void addPomGeneration(Project project) {
+ project.plugins.apply(MavenBasePublishPlugin.class)
+ project.plugins.apply(MavenScmPlugin.class)
+
+ project.publishing {
+ publications {
+ nebula {
+ artifact project.bundlePlugin
+ pom.withXml {
+ // overwrite the name/description in the pom nebula set up
+ Node root = asNode()
+ for (Node node : root.children()) {
+ if (node.name() == 'name') {
+ node.setValue(project.pluginProperties.extension.name)
+ } else if (node.name() == 'description') {
+ node.setValue(project.pluginProperties.extension.description)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ }
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy
index 612bc568621..52de7dac2d5 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy
@@ -21,11 +21,11 @@ package org.elasticsearch.gradle.precommit
import org.elasticsearch.gradle.LoggedExec
import org.elasticsearch.gradle.VersionProperties
+import org.gradle.api.artifacts.Dependency
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.Input
import org.gradle.api.tasks.InputFiles
import org.gradle.api.tasks.OutputFile
-
/**
* Runs NamingConventionsCheck on a classpath/directory combo to verify that
* tests are named according to our conventions so they'll be picked up by
@@ -57,8 +57,27 @@ public class NamingConventionsTask extends LoggedExec {
@Input
boolean skipIntegTestInDisguise = false
+ /**
+ * Superclass for all tests.
+ */
+ @Input
+ String testClass = 'org.apache.lucene.util.LuceneTestCase'
+
+ /**
+ * Superclass for all integration tests.
+ */
+ @Input
+ String integTestClass = 'org.elasticsearch.test.ESIntegTestCase'
+
public NamingConventionsTask() {
- dependsOn(classpath)
+ // Extra classpath contains the actual test
+ project.configurations.create('namingConventions')
+ Dependency buildToolsDep = project.dependencies.add('namingConventions',
+ "org.elasticsearch.gradle:build-tools:${VersionProperties.elasticsearch}")
+ buildToolsDep.transitive = false // We don't need gradle in the classpath. It conflicts.
+ FileCollection extraClasspath = project.configurations.namingConventions
+ dependsOn(extraClasspath)
+
description = "Runs NamingConventionsCheck on ${classpath}"
executable = new File(project.javaHome, 'bin/java')
onlyIf { project.sourceSets.test.output.classesDir.exists() }
@@ -69,9 +88,12 @@ public class NamingConventionsTask extends LoggedExec {
project.afterEvaluate {
doFirst {
args('-Djna.nosys=true')
- args('-cp', classpath.asPath, 'org.elasticsearch.test.NamingConventionsCheck')
+ args('-cp', (classpath + extraClasspath).asPath, 'org.elasticsearch.test.NamingConventionsCheck')
+ args('--test-class', testClass)
if (skipIntegTestInDisguise) {
args('--skip-integ-tests-in-disguise')
+ } else {
+ args('--integ-test-class', integTestClass)
}
/*
* The test framework has classes that fail the checks to validate that the checks fail properly.
@@ -79,7 +101,7 @@ public class NamingConventionsTask extends LoggedExec {
* process of ignoring them lets us validate that they were found so this ignore parameter acts
* as the test for the NamingConventionsCheck.
*/
- if (':test:framework'.equals(project.path)) {
+ if (':build-tools'.equals(project.path)) {
args('--self-test')
}
args('--', project.sourceSets.test.output.classesDir.absolutePath)
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
index 427d3191dc5..a5e1e4c8932 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy
@@ -34,7 +34,6 @@ class PrecommitTasks {
configureForbiddenApis(project),
configureCheckstyle(project),
configureNamingConventions(project),
- configureLoggerUsage(project),
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
project.tasks.create('jarHell', JarHellTask.class),
@@ -49,6 +48,20 @@ class PrecommitTasks {
UpdateShasTask updateShas = project.tasks.create('updateShas', UpdateShasTask.class)
updateShas.parentTask = dependencyLicenses
}
+ if (project.path != ':build-tools') {
+ /*
+ * Sadly, build-tools can't have logger-usage-check because that
+ * would create a circular project dependency between build-tools
+ * (which provides NamingConventionsCheck) and :test:logger-usage
+ * which provides the logger usage check. Since the build tools
+ * don't use the logger usage check because they don't have any
+ * of Elaticsearch's loggers and :test:logger-usage actually does
+ * use the NamingConventionsCheck we break the circular dependency
+ * here.
+ */
+ precommitTasks.add(configureLoggerUsage(project))
+ }
+
Map precommitOptions = [
name: 'precommit',
@@ -62,9 +75,8 @@ class PrecommitTasks {
private static Task configureForbiddenApis(Project project) {
project.pluginManager.apply(ForbiddenApisPlugin.class)
project.forbiddenApis {
- internalRuntimeForbidden = true
failOnUnsupportedJava = false
- bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out']
+ bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-non-portable', 'jdk-system-out']
signaturesURLs = [getClass().getResource('/forbidden/jdk-signatures.txt'),
getClass().getResource('/forbidden/es-all-signatures.txt')]
suppressAnnotations = ['**.SuppressForbidden']
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy
index 3ff5a06ad42..076a564f84a 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy
@@ -203,8 +203,7 @@ public class ThirdPartyAuditTask extends AntTask {
Set sheistySet = getSheistyClasses(tmpDir.toPath());
try {
- ant.thirdPartyAudit(internalRuntimeForbidden: false,
- failOnUnsupportedJava: false,
+ ant.thirdPartyAudit(failOnUnsupportedJava: false,
failOnMissingClasses: false,
signaturesFile: new File(getClass().getResource('/forbidden/third-party-audit.txt').toURI()),
classpath: classpath.asPath) {
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
index 34dde6e5dad..c3004a64b86 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy
@@ -291,9 +291,10 @@ class ClusterFormationTasks {
File configDir = new File(node.homeDir, 'config')
copyConfig.into(configDir) // copy must always have a general dest dir, even though we don't use it
for (Map.Entry extraConfigFile : node.config.extraConfigFiles.entrySet()) {
+ Object extraConfigFileValue = extraConfigFile.getValue()
copyConfig.doFirst {
// make sure the copy won't be a no-op or act on a directory
- File srcConfigFile = project.file(extraConfigFile.getValue())
+ File srcConfigFile = project.file(extraConfigFileValue)
if (srcConfigFile.isDirectory()) {
throw new GradleException("Source for extraConfigFile must be a file: ${srcConfigFile}")
}
@@ -303,7 +304,7 @@ class ClusterFormationTasks {
}
File destConfigFile = new File(node.homeDir, 'config/' + extraConfigFile.getKey())
// wrap source file in closure to delay resolution to execution time
- copyConfig.from({ extraConfigFile.getValue() }) {
+ copyConfig.from({ extraConfigFileValue }) {
// this must be in a closure so it is only applied to the single file specified in from above
into(configDir.toPath().relativize(destConfigFile.canonicalFile.parentFile.toPath()).toFile())
rename { destConfigFile.name }
@@ -418,8 +419,7 @@ class ClusterFormationTasks {
// argument are wrapped in an ExecArgWrapper that escapes commas
args execArgs.collect { a -> new EscapeCommaWrapper(arg: a) }
} else {
- executable 'sh'
- args execArgs
+ commandLine execArgs
}
}
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
index 2ff5e333139..5d9961a0425 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy
@@ -129,18 +129,18 @@ class NodeInfo {
}
env = [ 'JAVA_HOME' : project.javaHome ]
- args.addAll("-E", "es.node.portsfile=true")
+ args.addAll("-E", "node.portsfile=true")
String collectedSystemProperties = config.systemProperties.collect { key, value -> "-D${key}=${value}" }.join(" ")
String esJavaOpts = config.jvmArgs.isEmpty() ? collectedSystemProperties : collectedSystemProperties + " " + config.jvmArgs
env.put('ES_JAVA_OPTS', esJavaOpts)
for (Map.Entry property : System.properties.entrySet()) {
- if (property.getKey().startsWith('es.')) {
+ if (property.key.startsWith('tests.es.')) {
args.add("-E")
- args.add("${property.getKey()}=${property.getValue()}")
+ args.add("${property.key.substring('tests.es.'.size())}=${property.value}")
}
}
env.put('ES_JVM_OPTIONS', new File(confDir, 'jvm.options'))
- args.addAll("-E", "es.path.conf=${confDir}")
+ args.addAll("-E", "path.conf=${confDir}")
if (Os.isFamily(Os.FAMILY_WINDOWS)) {
args.add('"') // end the entire command, quoted
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy
index 3bfe9d61018..fedcf6e87d3 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy
@@ -62,6 +62,7 @@ public class RestIntegTestTask extends RandomizedTestingTask {
project.gradle.projectsEvaluated {
NodeInfo node = ClusterFormationTasks.setup(project, this, clusterConfig)
systemProperty('tests.rest.cluster', "${-> node.httpUri()}")
+ systemProperty('tests.config.dir', "${-> node.confDir}")
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
// both as separate sysprops
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy
index 2f2030f6cd2..c68e0528c9b 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/BatsOverVagrantTask.groovy
@@ -19,6 +19,7 @@
package org.elasticsearch.gradle.vagrant
import org.gradle.api.DefaultTask
+import org.gradle.api.tasks.Input
import org.gradle.api.tasks.TaskAction
import org.gradle.logging.ProgressLoggerFactory
import org.gradle.process.internal.ExecAction
@@ -30,41 +31,22 @@ import javax.inject.Inject
* Runs bats over vagrant. Pretty much like running it using Exec but with a
* nicer output formatter.
*/
-class BatsOverVagrantTask extends DefaultTask {
- String command
- String boxName
- ExecAction execAction
+public class BatsOverVagrantTask extends VagrantCommandTask {
- BatsOverVagrantTask() {
- execAction = getExecActionFactory().newExecAction()
- }
+ @Input
+ String command
- @Inject
- ProgressLoggerFactory getProgressLoggerFactory() {
- throw new UnsupportedOperationException();
- }
+ BatsOverVagrantTask() {
+ project.afterEvaluate {
+ args 'ssh', boxName, '--command', command
+ }
+ }
- @Inject
- ExecActionFactory getExecActionFactory() {
- throw new UnsupportedOperationException();
- }
-
- void boxName(String boxName) {
- this.boxName = boxName
- }
-
- void command(String command) {
- this.command = command
- }
-
- @TaskAction
- void exec() {
- // It'd be nice if --machine-readable were, well, nice
- execAction.commandLine(['vagrant', 'ssh', boxName, '--command', command])
- execAction.setStandardOutput(new TapLoggerOutputStream(
- command: command,
- factory: getProgressLoggerFactory(),
- logger: logger))
- execAction.execute();
- }
+ @Override
+ protected OutputStream createLoggerOutputStream() {
+ return new TapLoggerOutputStream(
+ command: commandLine.join(' '),
+ factory: getProgressLoggerFactory(),
+ logger: logger)
+ }
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy
index 5f4a5e0a0c4..3f980c57a49 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/TapLoggerOutputStream.groovy
@@ -19,9 +19,11 @@
package org.elasticsearch.gradle.vagrant
import com.carrotsearch.gradle.junit4.LoggingOutputStream
+import groovy.transform.PackageScope
import org.gradle.api.GradleScriptException
import org.gradle.api.logging.Logger
import org.gradle.logging.ProgressLogger
+import org.gradle.logging.ProgressLoggerFactory
import java.util.regex.Matcher
@@ -35,73 +37,77 @@ import java.util.regex.Matcher
* There is a Tap4j project but we can't use it because it wants to parse the
* entire TAP stream at once and won't parse it stream-wise.
*/
-class TapLoggerOutputStream extends LoggingOutputStream {
- ProgressLogger progressLogger
- Logger logger
- int testsCompleted = 0
- int testsFailed = 0
- int testsSkipped = 0
- Integer testCount
- String countsFormat
+public class TapLoggerOutputStream extends LoggingOutputStream {
+ private final ProgressLogger progressLogger
+ private boolean isStarted = false
+ private final Logger logger
+ private int testsCompleted = 0
+ private int testsFailed = 0
+ private int testsSkipped = 0
+ private Integer testCount
+ private String countsFormat
- TapLoggerOutputStream(Map args) {
- logger = args.logger
- progressLogger = args.factory.newOperation(VagrantLoggerOutputStream)
- progressLogger.setDescription("TAP output for $args.command")
- progressLogger.started()
- progressLogger.progress("Starting $args.command...")
- }
-
- void flush() {
- if (end == start) return
- line(new String(buffer, start, end - start))
- start = end
- }
-
- void line(String line) {
- // System.out.print "===> $line\n"
- if (testCount == null) {
- try {
- testCount = line.split('\\.').last().toInteger()
- def length = (testCount as String).length()
- countsFormat = "%0${length}d"
- countsFormat = "[$countsFormat|$countsFormat|$countsFormat/$countsFormat]"
- return
- } catch (Exception e) {
- throw new GradleScriptException(
- 'Error parsing first line of TAP stream!!', e)
- }
- }
- Matcher m = line =~ /(?ok|not ok) \d+(? # skip (?\(.+\))?)? \[(?.+)\] (?.+)/
- if (!m.matches()) {
- /* These might be failure report lines or comments or whatever. Its hard
- to tell and it doesn't matter. */
- logger.warn(line)
- return
- }
- boolean skipped = m.group('skip') != null
- boolean success = !skipped && m.group('status') == 'ok'
- String skipReason = m.group('skipReason')
- String suiteName = m.group('suite')
- String testName = m.group('test')
-
- String status
- if (skipped) {
- status = "SKIPPED"
- testsSkipped++
- } else if (success) {
- status = " OK"
- testsCompleted++
- } else {
- status = " FAILED"
- testsFailed++
+ TapLoggerOutputStream(Map args) {
+ logger = args.logger
+ progressLogger = args.factory.newOperation(VagrantLoggerOutputStream)
+ progressLogger.setDescription("TAP output for `${args.command}`")
}
- String counts = sprintf(countsFormat,
- [testsCompleted, testsFailed, testsSkipped, testCount])
- progressLogger.progress("Tests $counts, $status [$suiteName] $testName")
- if (!success) {
- logger.warn(line)
+ @Override
+ public void flush() {
+ if (isStarted == false) {
+ progressLogger.started()
+ isStarted = true
+ }
+ if (end == start) return
+ line(new String(buffer, start, end - start))
+ start = end
+ }
+
+ void line(String line) {
+ // System.out.print "===> $line\n"
+ if (testCount == null) {
+ try {
+ testCount = line.split('\\.').last().toInteger()
+ def length = (testCount as String).length()
+ countsFormat = "%0${length}d"
+ countsFormat = "[$countsFormat|$countsFormat|$countsFormat/$countsFormat]"
+ return
+ } catch (Exception e) {
+ throw new GradleScriptException(
+ 'Error parsing first line of TAP stream!!', e)
+ }
+ }
+ Matcher m = line =~ /(?ok|not ok) \d+(? # skip (?\(.+\))?)? \[(?.+)\] (?.+)/
+ if (!m.matches()) {
+ /* These might be failure report lines or comments or whatever. Its hard
+ to tell and it doesn't matter. */
+ logger.warn(line)
+ return
+ }
+ boolean skipped = m.group('skip') != null
+ boolean success = !skipped && m.group('status') == 'ok'
+ String skipReason = m.group('skipReason')
+ String suiteName = m.group('suite')
+ String testName = m.group('test')
+
+ String status
+ if (skipped) {
+ status = "SKIPPED"
+ testsSkipped++
+ } else if (success) {
+ status = " OK"
+ testsCompleted++
+ } else {
+ status = " FAILED"
+ testsFailed++
+ }
+
+ String counts = sprintf(countsFormat,
+ [testsCompleted, testsFailed, testsSkipped, testCount])
+ progressLogger.progress("Tests $counts, $status [$suiteName] $testName")
+ if (!success) {
+ logger.warn(line)
+ }
}
- }
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy
index 92b4a575eba..d79c2533fab 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantCommandTask.groovy
@@ -18,11 +18,10 @@
*/
package org.elasticsearch.gradle.vagrant
-import org.gradle.api.DefaultTask
-import org.gradle.api.tasks.TaskAction
+import org.apache.commons.io.output.TeeOutputStream
+import org.elasticsearch.gradle.LoggedExec
+import org.gradle.api.tasks.Input
import org.gradle.logging.ProgressLoggerFactory
-import org.gradle.process.internal.ExecAction
-import org.gradle.process.internal.ExecActionFactory
import javax.inject.Inject
@@ -30,43 +29,30 @@ import javax.inject.Inject
* Runs a vagrant command. Pretty much like Exec task but with a nicer output
* formatter and defaults to `vagrant` as first part of commandLine.
*/
-class VagrantCommandTask extends DefaultTask {
- List commandLine
- String boxName
- ExecAction execAction
+public class VagrantCommandTask extends LoggedExec {
- VagrantCommandTask() {
- execAction = getExecActionFactory().newExecAction()
- }
+ @Input
+ String boxName
- @Inject
- ProgressLoggerFactory getProgressLoggerFactory() {
- throw new UnsupportedOperationException();
- }
+ public VagrantCommandTask() {
+ executable = 'vagrant'
+ project.afterEvaluate {
+ // It'd be nice if --machine-readable were, well, nice
+ standardOutput = new TeeOutputStream(standardOutput, createLoggerOutputStream())
+ }
+ }
- @Inject
- ExecActionFactory getExecActionFactory() {
- throw new UnsupportedOperationException();
- }
+ protected OutputStream createLoggerOutputStream() {
+ return new VagrantLoggerOutputStream(
+ command: commandLine.join(' '),
+ factory: getProgressLoggerFactory(),
+ /* Vagrant tends to output a lot of stuff, but most of the important
+ stuff starts with ==> $box */
+ squashedPrefix: "==> $boxName: ")
+ }
- void boxName(String boxName) {
- this.boxName = boxName
- }
-
- void commandLine(Object... commandLine) {
- this.commandLine = commandLine
- }
-
- @TaskAction
- void exec() {
- // It'd be nice if --machine-readable were, well, nice
- execAction.commandLine(['vagrant'] + commandLine)
- execAction.setStandardOutput(new VagrantLoggerOutputStream(
- command: commandLine.join(' '),
- factory: getProgressLoggerFactory(),
- /* Vagrant tends to output a lot of stuff, but most of the important
- stuff starts with ==> $box */
- squashedPrefix: "==> $boxName: "))
- execAction.execute();
- }
+ @Inject
+ ProgressLoggerFactory getProgressLoggerFactory() {
+ throw new UnsupportedOperationException();
+ }
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy
index 488c4511b1f..331a638b5ca 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantLoggerOutputStream.groovy
@@ -19,7 +19,9 @@
package org.elasticsearch.gradle.vagrant
import com.carrotsearch.gradle.junit4.LoggingOutputStream
+import org.gradle.api.logging.Logger
import org.gradle.logging.ProgressLogger
+import org.gradle.logging.ProgressLoggerFactory
/**
* Adapts an OutputStream being written to by vagrant into a ProcessLogger. It
@@ -42,79 +44,60 @@ import org.gradle.logging.ProgressLogger
* to catch so it can render the output like
* "Heading text > stdout from the provisioner".
*/
-class VagrantLoggerOutputStream extends LoggingOutputStream {
- static final String HEADING_PREFIX = '==> '
+public class VagrantLoggerOutputStream extends LoggingOutputStream {
+ private static final String HEADING_PREFIX = '==> '
- ProgressLogger progressLogger
- String squashedPrefix
- String lastLine = ''
- boolean inProgressReport = false
- String heading = ''
+ private final ProgressLogger progressLogger
+ private boolean isStarted = false
+ private String squashedPrefix
+ private String lastLine = ''
+ private boolean inProgressReport = false
+ private String heading = ''
- VagrantLoggerOutputStream(Map args) {
- progressLogger = args.factory.newOperation(VagrantLoggerOutputStream)
- progressLogger.setDescription("Vagrant $args.command")
- progressLogger.started()
- progressLogger.progress("Starting vagrant $args.command...")
- squashedPrefix = args.squashedPrefix
- }
-
- void flush() {
- if (end == start) return
- line(new String(buffer, start, end - start))
- start = end
- }
-
- void line(String line) {
- // debugPrintLine(line) // Uncomment me to log every incoming line
- if (line.startsWith('\r\u001b')) {
- /* We don't want to try to be a full terminal emulator but we want to
- keep the escape sequences from leaking and catch _some_ of the
- meaning. */
- line = line.substring(2)
- if ('[K' == line) {
- inProgressReport = true
- }
- return
+ VagrantLoggerOutputStream(Map args) {
+ progressLogger = args.factory.newOperation(VagrantLoggerOutputStream)
+ progressLogger.setDescription("Vagrant output for `$args.command`")
+ squashedPrefix = args.squashedPrefix
}
- if (line.startsWith(squashedPrefix)) {
- line = line.substring(squashedPrefix.length())
- inProgressReport = false
- lastLine = line
- if (line.startsWith(HEADING_PREFIX)) {
- line = line.substring(HEADING_PREFIX.length())
- heading = line + ' > '
- } else {
- line = heading + line
- }
- } else if (inProgressReport) {
- inProgressReport = false
- line = lastLine + line
- } else {
- return
- }
- // debugLogLine(line) // Uncomment me to log every line we add to the logger
- progressLogger.progress(line)
- }
- void debugPrintLine(line) {
- System.out.print '----------> '
- for (int i = start; i < end; i++) {
- switch (buffer[i] as char) {
- case ' '..'~':
- System.out.print buffer[i] as char
- break
- default:
- System.out.print '%'
- System.out.print Integer.toHexString(buffer[i])
- }
+ @Override
+ public void flush() {
+ if (isStarted == false) {
+ progressLogger.started()
+ isStarted = true
+ }
+ if (end == start) return
+ line(new String(buffer, start, end - start))
+ start = end
}
- System.out.print '\n'
- }
- void debugLogLine(line) {
- System.out.print '>>>>>>>>>>> '
- System.out.print line
- System.out.print '\n'
- }
+ void line(String line) {
+ if (line.startsWith('\r\u001b')) {
+ /* We don't want to try to be a full terminal emulator but we want to
+ keep the escape sequences from leaking and catch _some_ of the
+ meaning. */
+ line = line.substring(2)
+ if ('[K' == line) {
+ inProgressReport = true
+ }
+ return
+ }
+ if (line.startsWith(squashedPrefix)) {
+ line = line.substring(squashedPrefix.length())
+ inProgressReport = false
+ lastLine = line
+ if (line.startsWith(HEADING_PREFIX)) {
+ line = line.substring(HEADING_PREFIX.length())
+ heading = line + ' > '
+ } else {
+ line = heading + line
+ }
+ } else if (inProgressReport) {
+ inProgressReport = false
+ line = lastLine + line
+ } else {
+ return
+ }
+ progressLogger.progress(line)
+ }
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java b/buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java
similarity index 73%
rename from test/framework/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java
rename to buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java
index 13163cee029..cbfa31d1aaf 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java
+++ b/buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java
@@ -25,14 +25,11 @@ import java.nio.file.FileVisitResult;
import java.nio.file.FileVisitor;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.nio.file.Paths;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.HashSet;
import java.util.Set;
-import org.apache.lucene.util.LuceneTestCase;
-import org.elasticsearch.common.SuppressForbidden;
-import org.elasticsearch.common.io.PathUtils;
-
/**
* Checks that all tests in a directory are named according to our naming conventions. This is important because tests that do not follow
* our conventions aren't run by gradle. This was once a glorious unit test but now that Elasticsearch is a multi-module project it must be
@@ -46,30 +43,37 @@ import org.elasticsearch.common.io.PathUtils;
* {@code --self-test} that is only run in the test:framework project.
*/
public class NamingConventionsCheck {
- public static void main(String[] args) throws IOException, ClassNotFoundException {
- NamingConventionsCheck check = new NamingConventionsCheck();
+ public static void main(String[] args) throws IOException {
+ Class> testClass = null;
+ Class> integTestClass = null;
+ Path rootPath = null;
boolean skipIntegTestsInDisguise = false;
boolean selfTest = false;
- int i = 0;
- while (true) {
- switch (args[i]) {
- case "--skip-integ-tests-in-disguise":
- skipIntegTestsInDisguise = true;
- i++;
- continue;
- case "--self-test":
- selfTest = true;
- i++;
- continue;
- case "--":
- i++;
- break;
- default:
- fail("Expected -- before a path.");
+ for (int i = 0; i < args.length; i++) {
+ String arg = args[i];
+ switch (arg) {
+ case "--test-class":
+ testClass = loadClassWithoutInitializing(args[++i]);
+ break;
+ case "--integ-test-class":
+ integTestClass = loadClassWithoutInitializing(args[++i]);
+ break;
+ case "--skip-integ-tests-in-disguise":
+ skipIntegTestsInDisguise = true;
+ break;
+ case "--self-test":
+ selfTest = true;
+ break;
+ case "--":
+ rootPath = Paths.get(args[++i]);
+ break;
+ default:
+ fail("unsupported argument '" + arg + "'");
}
- break;
}
- check.check(PathUtils.get(args[i]));
+
+ NamingConventionsCheck check = new NamingConventionsCheck(testClass, integTestClass);
+ check.check(rootPath, skipIntegTestsInDisguise);
if (selfTest) {
assertViolation("WrongName", check.missingSuffix);
@@ -82,17 +86,15 @@ public class NamingConventionsCheck {
}
// Now we should have no violations
- assertNoViolations("Not all subclasses of " + ESTestCase.class.getSimpleName()
+ assertNoViolations("Not all subclasses of " + check.testClass.getSimpleName()
+ " match the naming convention. Concrete classes must end with [Tests]", check.missingSuffix);
assertNoViolations("Classes ending with [Tests] are abstract or interfaces", check.notRunnable);
assertNoViolations("Found inner classes that are tests, which are excluded from the test runner", check.innerClasses);
- String classesToSubclass = String.join(",", ESTestCase.class.getSimpleName(), ESTestCase.class.getSimpleName(),
- ESTokenStreamTestCase.class.getSimpleName(), LuceneTestCase.class.getSimpleName());
- assertNoViolations("Pure Unit-Test found must subclass one of [" + classesToSubclass + "]", check.pureUnitTest);
- assertNoViolations("Classes ending with [Tests] must subclass [" + classesToSubclass + "]", check.notImplementing);
- if (!skipIntegTestsInDisguise) {
- assertNoViolations("Subclasses of ESIntegTestCase should end with IT as they are integration tests",
- check.integTestsInDisguise);
+ assertNoViolations("Pure Unit-Test found must subclass [" + check.testClass.getSimpleName() + "]", check.pureUnitTest);
+ assertNoViolations("Classes ending with [Tests] must subclass [" + check.testClass.getSimpleName() + "]", check.notImplementing);
+ if (skipIntegTestsInDisguise == false) {
+ assertNoViolations("Subclasses of " + check.integTestClass.getSimpleName() +
+ " should end with IT as they are integration tests", check.integTestsInDisguise);
}
}
@@ -103,7 +105,15 @@ public class NamingConventionsCheck {
private final Set> notRunnable = new HashSet<>();
private final Set> innerClasses = new HashSet<>();
- public void check(Path rootPath) throws IOException {
+ private final Class> testClass;
+ private final Class> integTestClass;
+
+ public NamingConventionsCheck(Class> testClass, Class> integTestClass) {
+ this.testClass = testClass;
+ this.integTestClass = integTestClass;
+ }
+
+ public void check(Path rootPath, boolean skipTestsInDisguised) throws IOException {
Files.walkFileTree(rootPath, new FileVisitor() {
/**
* The package name of the directory we are currently visiting. Kept as a string rather than something fancy because we load
@@ -136,9 +146,9 @@ public class NamingConventionsCheck {
String filename = file.getFileName().toString();
if (filename.endsWith(".class")) {
String className = filename.substring(0, filename.length() - ".class".length());
- Class> clazz = loadClass(className);
+ Class> clazz = loadClassWithoutInitializing(packageName + className);
if (clazz.getName().endsWith("Tests")) {
- if (ESIntegTestCase.class.isAssignableFrom(clazz)) {
+ if (skipTestsInDisguised == false && integTestClass.isAssignableFrom(clazz)) {
integTestsInDisguise.add(clazz);
}
if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) {
@@ -164,15 +174,7 @@ public class NamingConventionsCheck {
}
private boolean isTestCase(Class> clazz) {
- return LuceneTestCase.class.isAssignableFrom(clazz);
- }
-
- private Class> loadClass(String className) {
- try {
- return Thread.currentThread().getContextClassLoader().loadClass(packageName + className);
- } catch (ClassNotFoundException e) {
- throw new RuntimeException(e);
- }
+ return testClass.isAssignableFrom(clazz);
}
@Override
@@ -186,7 +188,6 @@ public class NamingConventionsCheck {
* Fail the process if there are any violations in the set. Named to look like a junit assertion even though it isn't because it is
* similar enough.
*/
- @SuppressForbidden(reason = "System.err/System.exit")
private static void assertNoViolations(String message, Set> set) {
if (false == set.isEmpty()) {
System.err.println(message + ":");
@@ -201,10 +202,9 @@ public class NamingConventionsCheck {
* Fail the process if we didn't detect a particular violation. Named to look like a junit assertion even though it isn't because it is
* similar enough.
*/
- @SuppressForbidden(reason = "System.err/System.exit")
- private static void assertViolation(String className, Set> set) throws ClassNotFoundException {
- className = "org.elasticsearch.test.test.NamingConventionsCheckBadClasses$" + className;
- if (false == set.remove(Class.forName(className))) {
+ private static void assertViolation(String className, Set> set) {
+ className = "org.elasticsearch.test.NamingConventionsCheckBadClasses$" + className;
+ if (false == set.remove(loadClassWithoutInitializing(className))) {
System.err.println("Error in NamingConventionsCheck! Expected [" + className + "] to be a violation but wasn't.");
System.exit(1);
}
@@ -213,9 +213,20 @@ public class NamingConventionsCheck {
/**
* Fail the process with the provided message.
*/
- @SuppressForbidden(reason = "System.err/System.exit")
private static void fail(String reason) {
System.err.println(reason);
System.exit(1);
}
+
+ static Class> loadClassWithoutInitializing(String name) {
+ try {
+ return Class.forName(name,
+ // Don't initialize the class to save time. Not needed for this test and this doesn't share a VM with any other tests.
+ false,
+ // Use our classloader rather than the bootstrap class loader.
+ NamingConventionsCheck.class.getClassLoader());
+ } catch (ClassNotFoundException e) {
+ throw new RuntimeException(e);
+ }
+ }
}
diff --git a/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.docs-test.properties
similarity index 71%
rename from core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties
rename to buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.docs-test.properties
index 4487d7c8de9..fb264ff4fd0 100644
--- a/core/src/test/resources/org/elasticsearch/nodesinfo/node2/dummy/_site/es-plugin.properties
+++ b/buildSrc/src/main/resources/META-INF/gradle-plugins/elasticsearch.docs-test.properties
@@ -1,10 +1,10 @@
-################################################################
+#
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License.
+# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
@@ -15,7 +15,6 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-################################################################
-description=This is a description for a dummy test site plugin.
-version=0.0.7-BOND-SITE
+#
+implementation-class=org.elasticsearch.gradle.doc.DocsTestPlugin
diff --git a/buildSrc/src/main/resources/beat.wav b/buildSrc/src/main/resources/beat.wav
deleted file mode 100644
index 4083a4ce618..00000000000
Binary files a/buildSrc/src/main/resources/beat.wav and /dev/null differ
diff --git a/buildSrc/src/main/resources/checkstyle.xml b/buildSrc/src/main/resources/checkstyle.xml
index de47736913f..706ef46ffa1 100644
--- a/buildSrc/src/main/resources/checkstyle.xml
+++ b/buildSrc/src/main/resources/checkstyle.xml
@@ -39,6 +39,27 @@
+
+
+
+
+
-
-
+
+
@@ -19,9 +19,7 @@
-
-
@@ -38,8 +36,6 @@
-
-
@@ -61,7 +57,6 @@
-
@@ -105,7 +100,6 @@
-
@@ -178,21 +172,11 @@
-
-
-
-
-
-
-
-
-
-
@@ -201,10 +185,8 @@
-
-
@@ -216,13 +198,11 @@
-
-
@@ -251,10 +231,8 @@
-
-
@@ -267,7 +245,6 @@
-
@@ -288,7 +265,6 @@
-
@@ -345,7 +321,6 @@
-
@@ -364,26 +339,19 @@
-
-
-
-
-
-
-
@@ -392,21 +360,16 @@
-
-
-
-
-
@@ -420,16 +383,13 @@
-
-
-
@@ -476,14 +436,9 @@
-
-
-
-
-
@@ -500,7 +455,6 @@
-
@@ -516,12 +470,9 @@
-
-
-
@@ -530,31 +481,16 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -569,13 +505,11 @@
-
-
@@ -596,20 +530,15 @@
-
-
-
-
-
@@ -626,7 +555,6 @@
-
@@ -639,10 +567,8 @@
-
-
@@ -651,10 +577,7 @@
-
-
-
@@ -665,63 +588,39 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -736,10 +635,7 @@
-
-
-
@@ -764,7 +660,6 @@
-
@@ -778,7 +673,6 @@
-
@@ -788,7 +682,6 @@
-
@@ -799,25 +692,21 @@
-
-
-
-
@@ -843,10 +732,7 @@
-
-
-
@@ -865,7 +751,6 @@
-
@@ -877,13 +762,11 @@
-
-
@@ -932,7 +815,6 @@
-
@@ -948,18 +830,13 @@
-
-
-
-
-
@@ -986,9 +863,6 @@
-
-
-
@@ -1007,11 +881,8 @@
-
-
-
@@ -1056,14 +927,11 @@
-
-
-
@@ -1075,7 +943,6 @@
-
@@ -1106,7 +973,6 @@
-
@@ -1131,33 +997,14 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -1168,7 +1015,6 @@
-
@@ -1265,9 +1111,6 @@
-
-
-
@@ -1276,18 +1119,10 @@
-
-
-
-
-
-
-
-
@@ -1296,19 +1131,25 @@
-
-
-
+
+
+
+
+
+
+
+
+
@@ -1317,28 +1158,13 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
-
-
-
-
@@ -1360,13 +1186,8 @@
-
-
-
-
-
@@ -1386,21 +1207,13 @@
-
-
-
-
-
-
-
-
-
+
@@ -1423,36 +1236,11 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/buildSrc/src/main/resources/deb/postinst.ftl b/buildSrc/src/main/resources/deb/postinst.ftl
index 5f67242c265..9acfc0f084e 100644
--- a/buildSrc/src/main/resources/deb/postinst.ftl
+++ b/buildSrc/src/main/resources/deb/postinst.ftl
@@ -1,2 +1,2 @@
-#!/bin/sh -e
+#!/bin/bash -e
<% commands.each {command -> %><%= command %><% } %>
diff --git a/buildSrc/src/main/resources/deb/preinst.ftl b/buildSrc/src/main/resources/deb/preinst.ftl
index 5f67242c265..9acfc0f084e 100644
--- a/buildSrc/src/main/resources/deb/preinst.ftl
+++ b/buildSrc/src/main/resources/deb/preinst.ftl
@@ -1,2 +1,2 @@
-#!/bin/sh -e
+#!/bin/bash -e
<% commands.each {command -> %><%= command %><% } %>
diff --git a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt
index 0e5ce884d9d..37f03f4c91c 100644
--- a/buildSrc/src/main/resources/forbidden/es-all-signatures.txt
+++ b/buildSrc/src/main/resources/forbidden/es-all-signatures.txt
@@ -32,4 +32,7 @@ org.apache.lucene.index.IndexReader#getCombinedCoreAndDeletesKey()
@defaultMessage Soon to be removed
org.apache.lucene.document.FieldType#numericType()
-org.apache.lucene.document.InetAddressPoint#newPrefixQuery(java.lang.String, java.net.InetAddress, int) @LUCENE-7232
+@defaultMessage Don't use MethodHandles in slow ways, don't be lenient in tests.
+java.lang.invoke.MethodHandle#invoke(java.lang.Object[])
+java.lang.invoke.MethodHandle#invokeWithArguments(java.lang.Object[])
+java.lang.invoke.MethodHandle#invokeWithArguments(java.util.List)
diff --git a/buildSrc/src/main/resources/forbidden/es-test-signatures.txt b/buildSrc/src/main/resources/forbidden/es-test-signatures.txt
index bd6744ee05f..08e591e1cfa 100644
--- a/buildSrc/src/main/resources/forbidden/es-test-signatures.txt
+++ b/buildSrc/src/main/resources/forbidden/es-test-signatures.txt
@@ -21,5 +21,7 @@ com.carrotsearch.randomizedtesting.annotations.Repeat @ Don't commit hardcoded r
org.apache.lucene.codecs.Codec#setDefault(org.apache.lucene.codecs.Codec) @ Use the SuppressCodecs("*") annotation instead
org.apache.lucene.util.LuceneTestCase$Slow @ Don't write slow tests
org.junit.Ignore @ Use AwaitsFix instead
+org.apache.lucene.util.LuceneTestCase$Nightly @ We don't run nightly tests at this point!
+com.carrotsearch.randomizedtesting.annotations.Nightly @ We don't run nightly tests at this point!
org.junit.Test @defaultMessage Just name your test method testFooBar
diff --git a/test/framework/src/test/java/org/elasticsearch/test/test/NamingConventionsCheckBadClasses.java b/buildSrc/src/test/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java
similarity index 59%
rename from test/framework/src/test/java/org/elasticsearch/test/test/NamingConventionsCheckBadClasses.java
rename to buildSrc/src/test/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java
index 233e9fe5975..4fc88b3afc5 100644
--- a/test/framework/src/test/java/org/elasticsearch/test/test/NamingConventionsCheckBadClasses.java
+++ b/buildSrc/src/test/java/org/elasticsearch/test/NamingConventionsCheckBadClasses.java
@@ -17,9 +17,7 @@
* under the License.
*/
-package org.elasticsearch.test.test;
-
-import org.elasticsearch.test.ESTestCase;
+package org.elasticsearch.test;
import junit.framework.TestCase;
@@ -30,21 +28,35 @@ public class NamingConventionsCheckBadClasses {
public static final class NotImplementingTests {
}
- public static final class WrongName extends ESTestCase {
+ public static final class WrongName extends UnitTestCase {
+ /*
+ * Dummy test so the tests pass. We do this *and* skip the tests so anyone who jumps back to a branch without these tests can still
+ * compile without a failure. That is because clean doesn't actually clean these....
+ */
+ public void testDummy() {}
}
- public static abstract class DummyAbstractTests extends ESTestCase {
+ public abstract static class DummyAbstractTests extends UnitTestCase {
}
public interface DummyInterfaceTests {
}
- public static final class InnerTests extends ESTestCase {
+ public static final class InnerTests extends UnitTestCase {
+ public void testDummy() {}
}
- public static final class WrongNameTheSecond extends ESTestCase {
+ public static final class WrongNameTheSecond extends UnitTestCase {
+ public void testDummy() {}
}
public static final class PlainUnit extends TestCase {
+ public void testDummy() {}
+ }
+
+ public abstract static class UnitTestCase extends TestCase {
+ }
+
+ public abstract static class IntegTestCase extends UnitTestCase {
}
}
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index 0f6a09327d6..7565488d4ab 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -1,5 +1,5 @@
-elasticsearch = 5.0.0-alpha2
-lucene = 6.0.0
+elasticsearch = 5.0.0-alpha5
+lucene = 6.1.0
# optional dependencies
spatial4j = 0.6
@@ -7,15 +7,16 @@ jts = 1.13
jackson = 2.7.1
log4j = 1.2.17
slf4j = 1.6.2
-jna = 4.1.0
-
+jna = 4.2.2
# test dependencies
randomizedrunner = 2.3.2
junit = 4.11
-# TODO: Upgrade httpclient to a version > 4.5.1 once released. Then remove o.e.test.rest.client.StrictHostnameVerifier* and use
-# DefaultHostnameVerifier instead since we no longer need to workaround https://issues.apache.org/jira/browse/HTTPCLIENT-1698
-httpclient = 4.3.6
-httpcore = 4.3.3
+httpclient = 4.5.2
+httpcore = 4.4.4
commonslogging = 1.1.3
commonscodec = 1.10
+hamcrest = 1.3
+securemock = 1.2
+# benchmark dependencies
+jmh = 1.12
diff --git a/client/rest/build.gradle b/client/rest/build.gradle
new file mode 100644
index 00000000000..4623fbd8c2c
--- /dev/null
+++ b/client/rest/build.gradle
@@ -0,0 +1,80 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.gradle.precommit.PrecommitTasks
+import org.gradle.api.JavaVersion
+
+apply plugin: 'elasticsearch.build'
+apply plugin: 'ru.vyarus.animalsniffer'
+
+targetCompatibility = JavaVersion.VERSION_1_7
+sourceCompatibility = JavaVersion.VERSION_1_7
+
+group = 'org.elasticsearch.client'
+
+dependencies {
+ compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
+ compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
+ compile "commons-codec:commons-codec:${versions.commonscodec}"
+ compile "commons-logging:commons-logging:${versions.commonslogging}"
+
+ testCompile "org.elasticsearch.client:test:${version}"
+ testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
+ testCompile "junit:junit:${versions.junit}"
+ testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
+ testCompile "org.elasticsearch:securemock:${versions.securemock}"
+ testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15"
+ signature "org.codehaus.mojo.signature:java17:1.0@signature"
+}
+
+forbiddenApisMain {
+ //client does not depend on core, so only jdk signatures should be checked
+ signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
+}
+
+forbiddenApisTest {
+ //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
+ bundledSignatures -= 'jdk-non-portable'
+ bundledSignatures += 'jdk-internal'
+ //client does not depend on core, so only jdk signatures should be checked
+ signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
+}
+
+//JarHell is part of es core, which we don't want to pull in
+jarHell.enabled=false
+
+namingConventions {
+ testClass = 'org.elasticsearch.client.RestClientTestCase'
+ //we don't have integration tests
+ skipIntegTestInDisguise = true
+}
+
+thirdPartyAudit.excludes = [
+ //commons-logging optional dependencies
+ 'org.apache.avalon.framework.logger.Logger',
+ 'org.apache.log.Hierarchy',
+ 'org.apache.log.Logger',
+ 'org.apache.log4j.Category',
+ 'org.apache.log4j.Level',
+ 'org.apache.log4j.Logger',
+ 'org.apache.log4j.Priority',
+ //commons-logging provided dependencies
+ 'javax.servlet.ServletContextEvent',
+ 'javax.servlet.ServletContextListener'
+]
diff --git a/plugins/discovery-azure/licenses/commons-codec-1.10.jar.sha1 b/client/rest/licenses/commons-codec-1.10.jar.sha1
similarity index 100%
rename from plugins/discovery-azure/licenses/commons-codec-1.10.jar.sha1
rename to client/rest/licenses/commons-codec-1.10.jar.sha1
diff --git a/plugins/discovery-azure/licenses/commons-codec-LICENSE.txt b/client/rest/licenses/commons-codec-LICENSE.txt
similarity index 100%
rename from plugins/discovery-azure/licenses/commons-codec-LICENSE.txt
rename to client/rest/licenses/commons-codec-LICENSE.txt
diff --git a/client/rest/licenses/commons-codec-NOTICE.txt b/client/rest/licenses/commons-codec-NOTICE.txt
new file mode 100644
index 00000000000..1da9af50f60
--- /dev/null
+++ b/client/rest/licenses/commons-codec-NOTICE.txt
@@ -0,0 +1,17 @@
+Apache Commons Codec
+Copyright 2002-2014 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java
+contains test data from http://aspell.net/test/orig/batch0.tab.
+Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org)
+
+===============================================================================
+
+The content of package org.apache.commons.codec.language.bm has been translated
+from the original php source code available at http://stevemorse.org/phoneticinfo.htm
+with permission from the original authors.
+Original source copyright:
+Copyright (c) 2008 Alexander Beider & Stephen P. Morse.
diff --git a/client/rest/licenses/commons-logging-1.1.3.jar.sha1 b/client/rest/licenses/commons-logging-1.1.3.jar.sha1
new file mode 100644
index 00000000000..5b8f029e582
--- /dev/null
+++ b/client/rest/licenses/commons-logging-1.1.3.jar.sha1
@@ -0,0 +1 @@
+f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f
\ No newline at end of file
diff --git a/plugins/discovery-azure/licenses/commons-logging-LICENSE.txt b/client/rest/licenses/commons-logging-LICENSE.txt
similarity index 100%
rename from plugins/discovery-azure/licenses/commons-logging-LICENSE.txt
rename to client/rest/licenses/commons-logging-LICENSE.txt
diff --git a/client/rest/licenses/commons-logging-NOTICE.txt b/client/rest/licenses/commons-logging-NOTICE.txt
new file mode 100644
index 00000000000..556bd03951d
--- /dev/null
+++ b/client/rest/licenses/commons-logging-NOTICE.txt
@@ -0,0 +1,6 @@
+Apache Commons Logging
+Copyright 2003-2014 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
diff --git a/client/rest/licenses/httpclient-4.5.2.jar.sha1 b/client/rest/licenses/httpclient-4.5.2.jar.sha1
new file mode 100644
index 00000000000..6937112a09f
--- /dev/null
+++ b/client/rest/licenses/httpclient-4.5.2.jar.sha1
@@ -0,0 +1 @@
+733db77aa8d9b2d68015189df76ab06304406e50
\ No newline at end of file
diff --git a/plugins/discovery-azure/licenses/httpclient-LICENSE.txt b/client/rest/licenses/httpclient-LICENSE.txt
similarity index 100%
rename from plugins/discovery-azure/licenses/httpclient-LICENSE.txt
rename to client/rest/licenses/httpclient-LICENSE.txt
diff --git a/client/rest/licenses/httpclient-NOTICE.txt b/client/rest/licenses/httpclient-NOTICE.txt
new file mode 100644
index 00000000000..91e5c40c4c6
--- /dev/null
+++ b/client/rest/licenses/httpclient-NOTICE.txt
@@ -0,0 +1,6 @@
+Apache HttpComponents Client
+Copyright 1999-2016 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
diff --git a/client/rest/licenses/httpcore-4.4.4.jar.sha1 b/client/rest/licenses/httpcore-4.4.4.jar.sha1
new file mode 100644
index 00000000000..ef0c257e012
--- /dev/null
+++ b/client/rest/licenses/httpcore-4.4.4.jar.sha1
@@ -0,0 +1 @@
+b31526a230871fbe285fbcbe2813f9c0839ae9b0
\ No newline at end of file
diff --git a/client/rest/licenses/httpcore-LICENSE.txt b/client/rest/licenses/httpcore-LICENSE.txt
new file mode 100644
index 00000000000..32f01eda18f
--- /dev/null
+++ b/client/rest/licenses/httpcore-LICENSE.txt
@@ -0,0 +1,558 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+=========================================================================
+
+This project includes Public Suffix List copied from
+
+licensed under the terms of the Mozilla Public License, v. 2.0
+
+Full license text:
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/client/rest/licenses/httpcore-NOTICE.txt b/client/rest/licenses/httpcore-NOTICE.txt
new file mode 100644
index 00000000000..91e5c40c4c6
--- /dev/null
+++ b/client/rest/licenses/httpcore-NOTICE.txt
@@ -0,0 +1,6 @@
+Apache HttpComponents Client
+Copyright 1999-2016 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
diff --git a/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java
new file mode 100644
index 00000000000..a7b222da70e
--- /dev/null
+++ b/client/rest/src/main/java/org/elasticsearch/client/DeadHostState.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Holds the state of a dead connection to a host. Keeps track of how many failed attempts were performed and
+ * when the host should be retried (based on number of previous failed attempts).
+ * Class is immutable, a new copy of it should be created each time the state has to be changed.
+ */
+final class DeadHostState {
+
+ private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1);
+ private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30);
+
+ static final DeadHostState INITIAL_DEAD_STATE = new DeadHostState();
+
+ private final int failedAttempts;
+ private final long deadUntilNanos;
+
+ private DeadHostState() {
+ this.failedAttempts = 1;
+ this.deadUntilNanos = System.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS;
+ }
+
+ /**
+ * We keep track of how many times a certain node fails consecutively. The higher that number is the longer we will wait
+ * to retry that same node again. Minimum is 1 minute (for a node the only failed once), maximum is 30 minutes (for a node
+ * that failed many consecutive times).
+ */
+ DeadHostState(DeadHostState previousDeadHostState) {
+ long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1),
+ MAX_CONNECTION_TIMEOUT_NANOS);
+ this.deadUntilNanos = System.nanoTime() + timeoutNanos;
+ this.failedAttempts = previousDeadHostState.failedAttempts + 1;
+ }
+
+ /**
+ * Returns the timestamp (nanos) till the host is supposed to stay dead without being retried.
+ * After that the host should be retried.
+ */
+ long getDeadUntilNanos() {
+ return deadUntilNanos;
+ }
+
+ @Override
+ public String toString() {
+ return "DeadHostState{" +
+ "failedAttempts=" + failedAttempts +
+ ", deadUntilNanos=" + deadUntilNanos +
+ '}';
+ }
+}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java b/client/rest/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java
similarity index 80%
rename from test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
rename to client/rest/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java
index 480fc7b2f01..df08ae5a8d1 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
+++ b/client/rest/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java
@@ -16,8 +16,9 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.elasticsearch.test.rest.client.http;
+package org.elasticsearch.client;
+import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
import java.net.URI;
@@ -25,11 +26,11 @@ import java.net.URI;
/**
* Allows to send DELETE requests providing a body (not supported out of the box)
*/
-public class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase {
+final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase {
- public final static String METHOD_NAME = "DELETE";
+ static final String METHOD_NAME = HttpDelete.METHOD_NAME;
- public HttpDeleteWithEntity(final URI uri) {
+ HttpDeleteWithEntity(final URI uri) {
setURI(uri);
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java b/client/rest/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java
similarity index 81%
rename from test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
rename to client/rest/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java
index aa0129f4660..a3846beefe4 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
+++ b/client/rest/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java
@@ -16,20 +16,21 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.elasticsearch.test.rest.client.http;
+package org.elasticsearch.client;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+import org.apache.http.client.methods.HttpGet;
import java.net.URI;
/**
* Allows to send GET requests providing a body (not supported out of the box)
*/
-public class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
+final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
- public final static String METHOD_NAME = "GET";
+ static final String METHOD_NAME = HttpGet.METHOD_NAME;
- public HttpGetWithEntity(final URI uri) {
+ HttpGetWithEntity(final URI uri) {
setURI(uri);
}
diff --git a/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java
new file mode 100644
index 00000000000..24e6881fa1e
--- /dev/null
+++ b/client/rest/src/main/java/org/elasticsearch/client/RequestLogger.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpEntityEnclosingRequest;
+import org.apache.http.HttpHost;
+import org.apache.http.HttpResponse;
+import org.apache.http.RequestLine;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.entity.BufferedHttpEntity;
+import org.apache.http.entity.ContentType;
+import org.apache.http.util.EntityUtils;
+
+import java.io.BufferedReader;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.nio.charset.Charset;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Helper class that exposes static methods to unify the way requests are logged.
+ * Includes trace logging to log complete requests and responses in curl format.
+ * Useful for debugging, manually sending logged requests via curl and checking their responses.
+ * Trace logging is a feature that all the language clients provide.
+ */
+final class RequestLogger {
+
+ private static final Log tracer = LogFactory.getLog("tracer");
+
+ private RequestLogger() {
+ }
+
+ /**
+ * Logs a request that yielded a response
+ */
+ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) +
+ "] returned [" + httpResponse.getStatusLine() + "]");
+ }
+ if (tracer.isTraceEnabled()) {
+ String requestLine;
+ try {
+ requestLine = buildTraceRequest(request, host);
+ } catch(IOException e) {
+ requestLine = "";
+ tracer.trace("error while reading request for trace purposes", e);
+ }
+ String responseLine;
+ try {
+ responseLine = buildTraceResponse(httpResponse);
+ } catch(IOException e) {
+ responseLine = "";
+ tracer.trace("error while reading response for trace purposes", e);
+ }
+ tracer.trace(requestLine + '\n' + responseLine);
+ }
+ }
+
+ /**
+ * Logs a request that failed
+ */
+ static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, IOException e) {
+ if (logger.isDebugEnabled()) {
+ logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e);
+ }
+ if (tracer.isTraceEnabled()) {
+ String traceRequest;
+ try {
+ traceRequest = buildTraceRequest(request, host);
+ } catch (IOException e1) {
+ tracer.trace("error while reading request for trace purposes", e);
+ traceRequest = "";
+ }
+ tracer.trace(traceRequest);
+ }
+ }
+
+ /**
+ * Creates curl output for given request
+ */
+ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException {
+ String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestLine()) + "'";
+ if (request instanceof HttpEntityEnclosingRequest) {
+ HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
+ if (enclosingRequest.getEntity() != null) {
+ requestLine += " -d '";
+ HttpEntity entity = enclosingRequest.getEntity();
+ if (entity.isRepeatable() == false) {
+ entity = new BufferedHttpEntity(enclosingRequest.getEntity());
+ enclosingRequest.setEntity(entity);
+ }
+ requestLine += EntityUtils.toString(entity, StandardCharsets.UTF_8) + "'";
+ }
+ }
+ return requestLine;
+ }
+
+ /**
+ * Creates curl output for given response
+ */
+ static String buildTraceResponse(HttpResponse httpResponse) throws IOException {
+ String responseLine = "# " + httpResponse.getStatusLine().toString();
+ for (Header header : httpResponse.getAllHeaders()) {
+ responseLine += "\n# " + header.getName() + ": " + header.getValue();
+ }
+ responseLine += "\n#";
+ HttpEntity entity = httpResponse.getEntity();
+ if (entity != null) {
+ if (entity.isRepeatable() == false) {
+ entity = new BufferedHttpEntity(entity);
+ }
+ httpResponse.setEntity(entity);
+ ContentType contentType = ContentType.get(entity);
+ Charset charset = StandardCharsets.UTF_8;
+ if (contentType != null) {
+ charset = contentType.getCharset();
+ }
+ try (BufferedReader reader = new BufferedReader(new InputStreamReader(entity.getContent(), charset))) {
+ String line;
+ while( (line = reader.readLine()) != null) {
+ responseLine += "\n# " + line;
+ }
+ }
+ }
+ return responseLine;
+ }
+
+ private static String getUri(RequestLine requestLine) {
+ if (requestLine.getUri().charAt(0) != '/') {
+ return "/" + requestLine.getUri();
+ }
+ return requestLine.getUri();
+ }
+}
diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java
new file mode 100644
index 00000000000..f7685b27bb9
--- /dev/null
+++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java
@@ -0,0 +1,115 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpHost;
+import org.apache.http.RequestLine;
+import org.apache.http.StatusLine;
+import org.apache.http.client.methods.CloseableHttpResponse;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Objects;
+
+/**
+ * Holds an elasticsearch response. It wraps the {@link CloseableHttpResponse} response and associates it with
+ * its corresponding {@link RequestLine} and {@link HttpHost}.
+ * It must be closed to free any resource held by it, as well as the corresponding connection in the connection pool.
+ */
+public class Response implements Closeable {
+
+ private final RequestLine requestLine;
+ private final HttpHost host;
+ private final CloseableHttpResponse response;
+
+ Response(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) {
+ Objects.requireNonNull(requestLine, "requestLine cannot be null");
+ Objects.requireNonNull(host, "node cannot be null");
+ Objects.requireNonNull(response, "response cannot be null");
+ this.requestLine = requestLine;
+ this.host = host;
+ this.response = response;
+ }
+
+ /**
+ * Returns the request line that generated this response
+ */
+ public RequestLine getRequestLine() {
+ return requestLine;
+ }
+
+ /**
+ * Returns the node that returned this response
+ */
+ public HttpHost getHost() {
+ return host;
+ }
+
+ /**
+ * Returns the status line of the current response
+ */
+ public StatusLine getStatusLine() {
+ return response.getStatusLine();
+ }
+
+ /**
+ * Returns all the response headers
+ */
+ public Header[] getHeaders() {
+ return response.getAllHeaders();
+ }
+
+ /**
+ * Returns the value of the first header with a specified name of this message.
+ * If there is more than one matching header in the message the first element is returned.
+ * If there is no matching header in the message null
is returned.
+ */
+ public String getHeader(String name) {
+ Header header = response.getFirstHeader(name);
+ if (header == null) {
+ return null;
+ }
+ return header.getValue();
+ }
+
+ /**
+ * Returns the response body available, null otherwise
+ * @see HttpEntity
+ */
+ public HttpEntity getEntity() {
+ return response.getEntity();
+ }
+
+ @Override
+ public String toString() {
+ return "Response{" +
+ "requestLine=" + requestLine +
+ ", host=" + host +
+ ", response=" + response.getStatusLine() +
+ '}';
+ }
+
+ @Override
+ public void close() throws IOException {
+ this.response.close();
+ }
+}
diff --git a/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java
new file mode 100644
index 00000000000..44f59cce7db
--- /dev/null
+++ b/client/rest/src/main/java/org/elasticsearch/client/ResponseException.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import java.io.IOException;
+
+/**
+ * Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error.
+ * Note that the response body gets passed in as a string and read eagerly, which means that the Response object
+ * is expected to be closed and available only to read metadata like status line, request line, response headers.
+ */
+public class ResponseException extends IOException {
+
+ private Response response;
+ private final String responseBody;
+
+ ResponseException(Response response, String responseBody) throws IOException {
+ super(buildMessage(response,responseBody));
+ this.response = response;
+ this.responseBody = responseBody;
+ }
+
+ private static String buildMessage(Response response, String responseBody) {
+ String message = response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri()
+ + ": " + response.getStatusLine().toString();
+ if (responseBody != null) {
+ message += "\n" + responseBody;
+ }
+ return message;
+ }
+
+ /**
+ * Returns the {@link Response} that caused this exception to be thrown.
+ * Expected to be used only to read metadata like status line, request line, response headers. The response body should
+ * be retrieved using {@link #getResponseBody()}
+ */
+ public Response getResponse() {
+ return response;
+ }
+
+ /**
+ * Returns the response body as a string or null if there wasn't any.
+ * The body is eagerly consumed when an ResponseException gets created, and its corresponding Response
+ * gets closed straightaway so this method is the only way to get back the response body that was returned.
+ */
+ public String getResponseBody() {
+ return responseBody;
+ }
+}
diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClient.java b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java
new file mode 100644
index 00000000000..e3bb1b3c507
--- /dev/null
+++ b/client/rest/src/main/java/org/elasticsearch/client/RestClient.java
@@ -0,0 +1,508 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.client;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.http.Consts;
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpHost;
+import org.apache.http.HttpRequest;
+import org.apache.http.client.ClientProtocolException;
+import org.apache.http.client.config.RequestConfig;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpOptions;
+import org.apache.http.client.methods.HttpPatch;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.client.methods.HttpTrace;
+import org.apache.http.client.utils.URIBuilder;
+import org.apache.http.config.Registry;
+import org.apache.http.conn.socket.ConnectionSocketFactory;
+import org.apache.http.entity.ContentType;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
+import org.apache.http.util.EntityUtils;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * Client that connects to an elasticsearch cluster through http.
+ * Must be created using {@link Builder}, which allows to set all the different options or just rely on defaults.
+ * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later
+ * by calling {@link #setHosts(HttpHost...)}.
+ * The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When
+ * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and
+ * retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously
+ * failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that
+ * deserve a retry) are retried till one responds or none of them does, in which case an {@link IOException} will be thrown.
+ *
+ * Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format.
+ */
+public final class RestClient implements Closeable {
+
+ private static final Log logger = LogFactory.getLog(RestClient.class);
+ public static ContentType JSON_CONTENT_TYPE = ContentType.create("application/json", Consts.UTF_8);
+
+ private final CloseableHttpClient client;
+ //we don't rely on default headers supported by HttpClient as those cannot be replaced, plus it would get hairy
+ //when we create the HttpClient instance on our own as there would be two different ways to set the default headers.
+ private final Header[] defaultHeaders;
+ private final long maxRetryTimeoutMillis;
+ private final AtomicInteger lastHostIndex = new AtomicInteger(0);
+ private volatile Set hosts;
+ private final ConcurrentMap blacklist = new ConcurrentHashMap<>();
+ private final FailureListener failureListener;
+
+ private RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
+ HttpHost[] hosts, FailureListener failureListener) {
+ this.client = client;
+ this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
+ this.defaultHeaders = defaultHeaders;
+ this.failureListener = failureListener;
+ setHosts(hosts);
+ }
+
+ /**
+ * Replaces the hosts that the client communicates with.
+ * @see HttpHost
+ */
+ public synchronized void setHosts(HttpHost... hosts) {
+ if (hosts == null || hosts.length == 0) {
+ throw new IllegalArgumentException("hosts must not be null nor empty");
+ }
+ Set httpHosts = new HashSet<>();
+ for (HttpHost host : hosts) {
+ Objects.requireNonNull(host, "host cannot be null");
+ httpHosts.add(host);
+ }
+ this.hosts = Collections.unmodifiableSet(httpHosts);
+ this.blacklist.clear();
+ }
+
+ /**
+ * Sends a request to the elasticsearch cluster that the current client points to.
+ * Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters and request body.
+ *
+ * @param method the http method
+ * @param endpoint the path of the request (without host and port)
+ * @param headers the optional request headers
+ * @return the response returned by elasticsearch
+ * @throws IOException in case of a problem or the connection was aborted
+ * @throws ClientProtocolException in case of an http protocol error
+ * @throws ResponseException in case elasticsearch responded with a status code that indicated an error
+ */
+ public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
+ return performRequest(method, endpoint, Collections.emptyMap(), null, headers);
+ }
+
+ /**
+ * Sends a request to the elasticsearch cluster that the current client points to.
+ * Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body.
+ *
+ * @param method the http method
+ * @param endpoint the path of the request (without host and port)
+ * @param params the query_string parameters
+ * @param headers the optional request headers
+ * @return the response returned by elasticsearch
+ * @throws IOException in case of a problem or the connection was aborted
+ * @throws ClientProtocolException in case of an http protocol error
+ * @throws ResponseException in case elasticsearch responded with a status code that indicated an error
+ */
+ public Response performRequest(String method, String endpoint, Map params, Header... headers) throws IOException {
+ return performRequest(method, endpoint, params, null, headers);
+ }
+
+ /**
+ * Sends a request to the elasticsearch cluster that the current client points to.
+ * Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
+ * amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures,
+ * the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried
+ * till one responds or none of them does, in which case an {@link IOException} will be thrown.
+ *
+ * @param method the http method
+ * @param endpoint the path of the request (without host and port)
+ * @param params the query_string parameters
+ * @param entity the body of the request, null if not applicable
+ * @param headers the optional request headers
+ * @return the response returned by elasticsearch
+ * @throws IOException in case of a problem or the connection was aborted
+ * @throws ClientProtocolException in case of an http protocol error
+ * @throws ResponseException in case elasticsearch responded with a status code that indicated an error
+ */
+ public Response performRequest(String method, String endpoint, Map params,
+ HttpEntity entity, Header... headers) throws IOException {
+ URI uri = buildUri(endpoint, params);
+ HttpRequestBase request = createHttpRequest(method, uri, entity);
+ setHeaders(request, headers);
+ //we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt
+ long retryTimeoutMillis = Math.round(this.maxRetryTimeoutMillis / (float)100 * 98);
+ IOException lastSeenException = null;
+ long startTime = System.nanoTime();
+ for (HttpHost host : nextHost()) {
+ if (lastSeenException != null) {
+ //in case we are retrying, check whether maxRetryTimeout has been reached, in which case an exception will be thrown
+ long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
+ long timeout = retryTimeoutMillis - timeElapsedMillis;
+ if (timeout <= 0) {
+ IOException retryTimeoutException = new IOException(
+ "request retries exceeded max retry timeout [" + retryTimeoutMillis + "]");
+ retryTimeoutException.addSuppressed(lastSeenException);
+ throw retryTimeoutException;
+ }
+ //also reset the request to make it reusable for the next attempt
+ request.reset();
+ }
+
+ CloseableHttpResponse httpResponse;
+ try {
+ httpResponse = client.execute(host, request);
+ } catch(IOException e) {
+ RequestLogger.logFailedRequest(logger, request, host, e);
+ onFailure(host);
+ lastSeenException = addSuppressedException(lastSeenException, e);
+ continue;
+ }
+ Response response = new Response(request.getRequestLine(), host, httpResponse);
+ int statusCode = response.getStatusLine().getStatusCode();
+ if (statusCode < 300 || (request.getMethod().equals(HttpHead.METHOD_NAME) && statusCode == 404) ) {
+ RequestLogger.logResponse(logger, request, host, httpResponse);
+ onResponse(host);
+ return response;
+ }
+ RequestLogger.logResponse(logger, request, host, httpResponse);
+ String responseBody;
+ try {
+ if (response.getEntity() == null) {
+ responseBody = null;
+ } else {
+ responseBody = EntityUtils.toString(response.getEntity());
+ }
+ } finally {
+ response.close();
+ }
+ lastSeenException = addSuppressedException(lastSeenException, new ResponseException(response, responseBody));
+ switch(statusCode) {
+ case 502:
+ case 503:
+ case 504:
+ //mark host dead and retry against next one
+ onFailure(host);
+ break;
+ default:
+ //mark host alive and don't retry, as the error should be a request problem
+ onResponse(host);
+ throw lastSeenException;
+ }
+ }
+ //we get here only when we tried all nodes and they all failed
+ assert lastSeenException != null;
+ throw lastSeenException;
+ }
+
+ private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) {
+ Objects.requireNonNull(requestHeaders, "request headers must not be null");
+ for (Header defaultHeader : defaultHeaders) {
+ httpRequest.setHeader(defaultHeader);
+ }
+ for (Header requestHeader : requestHeaders) {
+ Objects.requireNonNull(requestHeader, "request header must not be null");
+ httpRequest.setHeader(requestHeader);
+ }
+ }
+
+ /**
+ * Returns an iterator of hosts to be used for a request call.
+ * Ideally, the first host is retrieved from the iterator and used successfully for the request.
+ * Otherwise, after each failure the next host should be retrieved from the iterator so that the request can be retried till
+ * the iterator is exhausted. The maximum total of attempts is equal to the number of hosts that are available in the iterator.
+ * The iterator returned will never be empty, rather an {@link IllegalStateException} in case there are no hosts.
+ * In case there are no healthy hosts available, or dead ones to be be retried, one dead host gets returned.
+ */
+ private Iterable nextHost() {
+ Set filteredHosts = new HashSet<>(hosts);
+ for (Map.Entry entry : blacklist.entrySet()) {
+ if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) {
+ filteredHosts.remove(entry.getKey());
+ }
+ }
+
+ if (filteredHosts.isEmpty()) {
+ //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried
+ List> sortedHosts = new ArrayList<>(blacklist.entrySet());
+ Collections.sort(sortedHosts, new Comparator>() {
+ @Override
+ public int compare(Map.Entry o1, Map.Entry o2) {
+ return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos());
+ }
+ });
+ HttpHost deadHost = sortedHosts.get(0).getKey();
+ logger.trace("resurrecting host [" + deadHost + "]");
+ return Collections.singleton(deadHost);
+ }
+
+ List rotatedHosts = new ArrayList<>(filteredHosts);
+ Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement());
+ return rotatedHosts;
+ }
+
+ /**
+ * Called after each successful request call.
+ * Receives as an argument the host that was used for the successful request.
+ */
+ private void onResponse(HttpHost host) {
+ DeadHostState removedHost = this.blacklist.remove(host);
+ if (logger.isDebugEnabled() && removedHost != null) {
+ logger.debug("removed host [" + host + "] from blacklist");
+ }
+ }
+
+ /**
+ * Called after each failed attempt.
+ * Receives as an argument the host that was used for the failed attempt.
+ */
+ private void onFailure(HttpHost host) throws IOException {
+ while(true) {
+ DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, DeadHostState.INITIAL_DEAD_STATE);
+ if (previousDeadHostState == null) {
+ logger.debug("added host [" + host + "] to blacklist");
+ break;
+ }
+ if (blacklist.replace(host, previousDeadHostState, new DeadHostState(previousDeadHostState))) {
+ logger.debug("updated host [" + host + "] already in blacklist");
+ break;
+ }
+ }
+ failureListener.onFailure(host);
+ }
+
+ @Override
+ public void close() throws IOException {
+ client.close();
+ }
+
+ private static IOException addSuppressedException(IOException suppressedException, IOException currentException) {
+ if (suppressedException != null) {
+ currentException.addSuppressed(suppressedException);
+ }
+ return currentException;
+ }
+
+ private static HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) {
+ switch(method.toUpperCase(Locale.ROOT)) {
+ case HttpDeleteWithEntity.METHOD_NAME:
+ return addRequestBody(new HttpDeleteWithEntity(uri), entity);
+ case HttpGetWithEntity.METHOD_NAME:
+ return addRequestBody(new HttpGetWithEntity(uri), entity);
+ case HttpHead.METHOD_NAME:
+ return addRequestBody(new HttpHead(uri), entity);
+ case HttpOptions.METHOD_NAME:
+ return addRequestBody(new HttpOptions(uri), entity);
+ case HttpPatch.METHOD_NAME:
+ return addRequestBody(new HttpPatch(uri), entity);
+ case HttpPost.METHOD_NAME:
+ HttpPost httpPost = new HttpPost(uri);
+ addRequestBody(httpPost, entity);
+ return httpPost;
+ case HttpPut.METHOD_NAME:
+ return addRequestBody(new HttpPut(uri), entity);
+ case HttpTrace.METHOD_NAME:
+ return addRequestBody(new HttpTrace(uri), entity);
+ default:
+ throw new UnsupportedOperationException("http method not supported: " + method);
+ }
+ }
+
+ private static HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpEntity entity) {
+ if (entity != null) {
+ if (httpRequest instanceof HttpEntityEnclosingRequestBase) {
+ ((HttpEntityEnclosingRequestBase)httpRequest).setEntity(entity);
+ } else {
+ throw new UnsupportedOperationException(httpRequest.getMethod() + " with body is not supported");
+ }
+ }
+ return httpRequest;
+ }
+
+ private static URI buildUri(String path, Map params) {
+ Objects.requireNonNull(params, "params must not be null");
+ try {
+ URIBuilder uriBuilder = new URIBuilder(path);
+ for (Map.Entry param : params.entrySet()) {
+ uriBuilder.addParameter(param.getKey(), param.getValue());
+ }
+ return uriBuilder.build();
+ } catch(URISyntaxException e) {
+ throw new IllegalArgumentException(e.getMessage(), e);
+ }
+ }
+
+ /**
+ * Returns a new {@link Builder} to help with {@link RestClient} creation.
+ */
+ public static Builder builder(HttpHost... hosts) {
+ return new Builder(hosts);
+ }
+
+ /**
+ * Rest client builder. Helps creating a new {@link RestClient}.
+ */
+ public static final class Builder {
+ public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000;
+ public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 10000;
+ public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS;
+ public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS = 500;
+
+ private static final Header[] EMPTY_HEADERS = new Header[0];
+
+ private final HttpHost[] hosts;
+ private CloseableHttpClient httpClient;
+ private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
+ private Header[] defaultHeaders = EMPTY_HEADERS;
+ private FailureListener failureListener;
+
+ /**
+ * Creates a new builder instance and sets the hosts that the client will send requests to.
+ */
+ private Builder(HttpHost... hosts) {
+ if (hosts == null || hosts.length == 0) {
+ throw new IllegalArgumentException("no hosts provided");
+ }
+ this.hosts = hosts;
+ }
+
+ /**
+ * Sets the http client. A new default one will be created if not
+ * specified, by calling {@link #createDefaultHttpClient(Registry)})}.
+ *
+ * @see CloseableHttpClient
+ */
+ public Builder setHttpClient(CloseableHttpClient httpClient) {
+ this.httpClient = httpClient;
+ return this;
+ }
+
+ /**
+ * Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
+ * {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
+ *
+ * @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0
+ */
+ public Builder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) {
+ if (maxRetryTimeoutMillis <= 0) {
+ throw new IllegalArgumentException("maxRetryTimeoutMillis must be greater than 0");
+ }
+ this.maxRetryTimeout = maxRetryTimeoutMillis;
+ return this;
+ }
+
+ /**
+ * Sets the default request headers, to be used when creating the default http client instance.
+ * In case the http client is set through {@link #setHttpClient(CloseableHttpClient)}, the default headers need to be
+ * set to it externally during http client construction.
+ */
+ public Builder setDefaultHeaders(Header[] defaultHeaders) {
+ Objects.requireNonNull(defaultHeaders, "default headers must not be null");
+ for (Header defaultHeader : defaultHeaders) {
+ Objects.requireNonNull(defaultHeader, "default header must not be null");
+ }
+ this.defaultHeaders = defaultHeaders;
+ return this;
+ }
+
+ /**
+ * Sets the {@link FailureListener} to be notified for each request failure
+ */
+ public Builder setFailureListener(FailureListener failureListener) {
+ Objects.requireNonNull(failureListener, "failure listener must not be null");
+ this.failureListener = failureListener;
+ return this;
+ }
+
+ /**
+ * Creates a new {@link RestClient} based on the provided configuration.
+ */
+ public RestClient build() {
+ if (httpClient == null) {
+ httpClient = createDefaultHttpClient(null);
+ }
+ if (failureListener == null) {
+ failureListener = new FailureListener();
+ }
+ return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
+ }
+
+ /**
+ * Creates a {@link CloseableHttpClient} with default settings. Used when the http client instance is not provided.
+ *
+ * @see CloseableHttpClient
+ */
+ public static CloseableHttpClient createDefaultHttpClient(Registry socketFactoryRegistry) {
+ PoolingHttpClientConnectionManager connectionManager;
+ if (socketFactoryRegistry == null) {
+ connectionManager = new PoolingHttpClientConnectionManager();
+ } else {
+ connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
+ }
+ //default settings may be too constraining
+ connectionManager.setDefaultMaxPerRoute(10);
+ connectionManager.setMaxTotal(30);
+
+ //default timeouts are all infinite
+ RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
+ .setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
+ .setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS).build();
+ return HttpClientBuilder.create().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build();
+ }
+ }
+
+ /**
+ * Listener that allows to be notified whenever a failure happens. Useful when sniffing is enabled, so that we can sniff on failure.
+ * The default implementation is a no-op.
+ */
+ public static class FailureListener {
+ /**
+ * Notifies that the host provided as argument has just failed
+ */
+ public void onFailure(HttpHost host) throws IOException {
+
+ }
+ }
+}
diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java b/client/rest/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java
similarity index 57%
rename from modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java
rename to client/rest/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java
index 38d48b98f4e..dd866bac541 100644
--- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java
+++ b/client/rest/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java
@@ -16,26 +16,27 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.elasticsearch.script.mustache;
-import com.fasterxml.jackson.core.io.JsonStringEncoder;
-import com.github.mustachejava.DefaultMustacheFactory;
-import com.github.mustachejava.MustacheException;
+package org.elasticsearch.client;
+
+import org.apache.http.StatusLine;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.message.BasicHttpResponse;
import java.io.IOException;
-import java.io.Writer;
/**
- * A MustacheFactory that does simple JSON escaping.
+ * Simple {@link CloseableHttpResponse} impl needed to easily create http responses that are closeable given that
+ * org.apache.http.impl.execchain.HttpResponseProxy is not public.
*/
-final class JsonEscapingMustacheFactory extends DefaultMustacheFactory {
+class CloseableBasicHttpResponse extends BasicHttpResponse implements CloseableHttpResponse {
+
+ public CloseableBasicHttpResponse(StatusLine statusline) {
+ super(statusline);
+ }
@Override
- public void encode(String value, Writer writer) {
- try {
- writer.write(JsonStringEncoder.getInstance().quoteAsString(value));
- } catch (IOException e) {
- throw new MustacheException("Failed to encode value: " + value);
- }
+ public void close() throws IOException {
+ //nothing to close
}
-}
+}
\ No newline at end of file
diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java
new file mode 100644
index 00000000000..4d3ad75b5e8
--- /dev/null
+++ b/client/rest/src/test/java/org/elasticsearch/client/RequestLoggerTests.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpEntityEnclosingRequest;
+import org.apache.http.HttpHost;
+import org.apache.http.ProtocolVersion;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpOptions;
+import org.apache.http.client.methods.HttpPatch;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpRequestBase;
+import org.apache.http.client.methods.HttpTrace;
+import org.apache.http.entity.InputStreamEntity;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.message.BasicHttpResponse;
+import org.apache.http.message.BasicStatusLine;
+import org.apache.http.util.EntityUtils;
+
+import java.io.ByteArrayInputStream;
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.nio.charset.StandardCharsets;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertThat;
+
+public class RequestLoggerTests extends RestClientTestCase {
+
+ public void testTraceRequest() throws IOException, URISyntaxException {
+ HttpHost host = new HttpHost("localhost", 9200, getRandom().nextBoolean() ? "http" : "https");
+
+ String expectedEndpoint = "/index/type/_api";
+ URI uri;
+ if (randomBoolean()) {
+ uri = new URI(expectedEndpoint);
+ } else {
+ uri = new URI("index/type/_api");
+ }
+
+ HttpRequestBase request;
+ int requestType = RandomInts.randomIntBetween(getRandom(), 0, 7);
+ switch(requestType) {
+ case 0:
+ request = new HttpGetWithEntity(uri);
+ break;
+ case 1:
+ request = new HttpPost(uri);
+ break;
+ case 2:
+ request = new HttpPut(uri);
+ break;
+ case 3:
+ request = new HttpDeleteWithEntity(uri);
+ break;
+ case 4:
+ request = new HttpHead(uri);
+ break;
+ case 5:
+ request = new HttpTrace(uri);
+ break;
+ case 6:
+ request = new HttpOptions(uri);
+ break;
+ case 7:
+ request = new HttpPatch(uri);
+ break;
+ default:
+ throw new UnsupportedOperationException();
+ }
+
+ String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'";
+ boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
+ String requestBody = "{ \"field\": \"value\" }";
+ if (hasBody) {
+ expected += " -d '" + requestBody + "'";
+ HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
+ HttpEntity entity;
+ if (getRandom().nextBoolean()) {
+ entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
+ } else {
+ entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)));
+ }
+ enclosingRequest.setEntity(entity);
+ }
+
+ String traceRequest = RequestLogger.buildTraceRequest(request, host);
+ assertThat(traceRequest, equalTo(expected));
+ if (hasBody) {
+ //check that the body is still readable as most entities are not repeatable
+ String body = EntityUtils.toString(((HttpEntityEnclosingRequest) request).getEntity(), StandardCharsets.UTF_8);
+ assertThat(body, equalTo(requestBody));
+ }
+ }
+
+ public void testTraceResponse() throws IOException {
+ ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
+ int statusCode = RandomInts.randomIntBetween(getRandom(), 200, 599);
+ String reasonPhrase = "REASON";
+ BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase);
+ String expected = "# " + statusLine.toString();
+ BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine);
+ int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
+ for (int i = 0; i < numHeaders; i++) {
+ httpResponse.setHeader("header" + i, "value");
+ expected += "\n# header" + i + ": value";
+ }
+ expected += "\n#";
+ boolean hasBody = getRandom().nextBoolean();
+ String responseBody = "{\n \"field\": \"value\"\n}";
+ if (hasBody) {
+ expected += "\n# {";
+ expected += "\n# \"field\": \"value\"";
+ expected += "\n# }";
+ HttpEntity entity;
+ if (getRandom().nextBoolean()) {
+ entity = new StringEntity(responseBody, StandardCharsets.UTF_8);
+ } else {
+ entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8)));
+ }
+ httpResponse.setEntity(entity);
+ }
+ String traceResponse = RequestLogger.buildTraceResponse(httpResponse);
+ assertThat(traceResponse, equalTo(expected));
+ if (hasBody) {
+ //check that the body is still readable as most entities are not repeatable
+ String body = EntityUtils.toString(httpResponse.getEntity(), StandardCharsets.UTF_8);
+ assertThat(body, equalTo(responseBody));
+ }
+ }
+}
diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java
new file mode 100644
index 00000000000..a16e961fd28
--- /dev/null
+++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import org.apache.http.Header;
+import org.apache.http.HttpHost;
+import org.apache.http.impl.client.HttpClientBuilder;
+import org.apache.http.message.BasicHeader;
+
+import java.io.IOException;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+public class RestClientBuilderTests extends RestClientTestCase {
+
+ public void testBuild() throws IOException {
+ try {
+ RestClient.builder((HttpHost[])null);
+ fail("should have failed");
+ } catch(IllegalArgumentException e) {
+ assertEquals("no hosts provided", e.getMessage());
+ }
+
+ try {
+ RestClient.builder();
+ fail("should have failed");
+ } catch(IllegalArgumentException e) {
+ assertEquals("no hosts provided", e.getMessage());
+ }
+
+ try {
+ RestClient.builder(new HttpHost[]{new HttpHost("localhost", 9200), null}).build();
+ fail("should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("host cannot be null", e.getMessage());
+ }
+
+ try {
+ RestClient.builder(new HttpHost("localhost", 9200))
+ .setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
+ fail("should have failed");
+ } catch(IllegalArgumentException e) {
+ assertEquals("maxRetryTimeoutMillis must be greater than 0", e.getMessage());
+ }
+
+ try {
+ RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(null);
+ fail("should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("default headers must not be null", e.getMessage());
+ }
+
+ try {
+ RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(new Header[]{null});
+ fail("should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("default header must not be null", e.getMessage());
+ }
+
+ try {
+ RestClient.builder(new HttpHost("localhost", 9200)).setFailureListener(null);
+ fail("should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("failure listener must not be null", e.getMessage());
+ }
+
+ int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ HttpHost[] hosts = new HttpHost[numNodes];
+ for (int i = 0; i < numNodes; i++) {
+ hosts[i] = new HttpHost("localhost", 9200 + i);
+ }
+ RestClient.Builder builder = RestClient.builder(hosts);
+ if (getRandom().nextBoolean()) {
+ builder.setHttpClient(HttpClientBuilder.create().build());
+ }
+ if (getRandom().nextBoolean()) {
+ int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ Header[] headers = new Header[numHeaders];
+ for (int i = 0; i < numHeaders; i++) {
+ headers[i] = new BasicHeader("header" + i, "value");
+ }
+ builder.setDefaultHeaders(headers);
+ }
+ if (getRandom().nextBoolean()) {
+ builder.setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
+ }
+ try (RestClient restClient = builder.build()) {
+ assertNotNull(restClient);
+ }
+ }
+}
diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java
new file mode 100644
index 00000000000..4a14c174353
--- /dev/null
+++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientIntegTests.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.sun.net.httpserver.Headers;
+import com.sun.net.httpserver.HttpExchange;
+import com.sun.net.httpserver.HttpHandler;
+import com.sun.net.httpserver.HttpServer;
+import org.apache.http.Consts;
+import org.apache.http.Header;
+import org.apache.http.HttpHost;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.message.BasicHeader;
+import org.apache.http.util.EntityUtils;
+import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes;
+import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
+import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
+ * Works against a real http server, one single host.
+ */
+//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
+@IgnoreJRERequirement
+public class RestClientIntegTests extends RestClientTestCase {
+
+ private static HttpServer httpServer;
+ private static RestClient restClient;
+ private static Header[] defaultHeaders;
+
+ @BeforeClass
+ public static void startHttpServer() throws Exception {
+ httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
+ httpServer.start();
+ //returns a different status code depending on the path
+ for (int statusCode : getAllStatusCodes()) {
+ createStatusCodeContext(httpServer, statusCode);
+ }
+ int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
+ defaultHeaders = new Header[numHeaders];
+ for (int i = 0; i < numHeaders; i++) {
+ String headerName = "Header-default" + (getRandom().nextBoolean() ? i : "");
+ String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
+ defaultHeaders[i] = new BasicHeader(headerName, headerValue);
+ }
+ restClient = RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()))
+ .setDefaultHeaders(defaultHeaders).build();
+ }
+
+ private static void createStatusCodeContext(HttpServer httpServer, final int statusCode) {
+ httpServer.createContext("/" + statusCode, new ResponseHandler(statusCode));
+ }
+
+ //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
+ @IgnoreJRERequirement
+ private static class ResponseHandler implements HttpHandler {
+ private final int statusCode;
+
+ ResponseHandler(int statusCode) {
+ this.statusCode = statusCode;
+ }
+
+ @Override
+ public void handle(HttpExchange httpExchange) throws IOException {
+ StringBuilder body = new StringBuilder();
+ try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) {
+ char[] buffer = new char[256];
+ int read;
+ while ((read = reader.read(buffer)) != -1) {
+ body.append(buffer, 0, read);
+ }
+ }
+ Headers requestHeaders = httpExchange.getRequestHeaders();
+ Headers responseHeaders = httpExchange.getResponseHeaders();
+ for (Map.Entry> header : requestHeaders.entrySet()) {
+ responseHeaders.put(header.getKey(), header.getValue());
+ }
+ httpExchange.getRequestBody().close();
+ httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length());
+ if (body.length() > 0) {
+ try (OutputStream out = httpExchange.getResponseBody()) {
+ out.write(body.toString().getBytes(Consts.UTF_8));
+ }
+ }
+ httpExchange.close();
+ }
+ }
+
+ @AfterClass
+ public static void stopHttpServers() throws IOException {
+ restClient.close();
+ restClient = null;
+ httpServer.stop(0);
+ httpServer = null;
+ }
+
+ /**
+ * End to end test for headers. We test it explicitly against a real http client as there are different ways
+ * to set/add headers to the {@link org.apache.http.client.HttpClient}.
+ * Exercises the test http server ability to send back whatever headers it received.
+ */
+ public void testHeaders() throws Exception {
+ for (String method : getHttpMethods()) {
+ Set standardHeaders = new HashSet<>(
+ Arrays.asList("Accept-encoding", "Connection", "Host", "User-agent", "Date"));
+ if (method.equals("HEAD") == false) {
+ standardHeaders.add("Content-length");
+ }
+ int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ Map expectedHeaders = new HashMap<>();
+ for (Header defaultHeader : defaultHeaders) {
+ expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue());
+ }
+ Header[] headers = new Header[numHeaders];
+ for (int i = 0; i < numHeaders; i++) {
+ String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
+ String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
+ headers[i] = new BasicHeader(headerName, headerValue);
+ expectedHeaders.put(headerName, headerValue);
+ }
+
+ int statusCode = randomStatusCode(getRandom());
+ Response esResponse;
+ try (Response response = restClient.performRequest(method, "/" + statusCode,
+ Collections.emptyMap(), null, headers)) {
+ esResponse = response;
+ } catch(ResponseException e) {
+ esResponse = e.getResponse();
+ }
+ assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
+ for (Header responseHeader : esResponse.getHeaders()) {
+ if (responseHeader.getName().startsWith("Header")) {
+ String headerValue = expectedHeaders.remove(responseHeader.getName());
+ assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue);
+ } else {
+ assertTrue("unknown header was returned " + responseHeader.getName(),
+ standardHeaders.remove(responseHeader.getName()));
+ }
+ }
+ assertEquals("some headers that were sent weren't returned: " + expectedHeaders, 0, expectedHeaders.size());
+ assertEquals("some expected standard headers weren't returned: " + standardHeaders, 0, standardHeaders.size());
+ }
+ }
+
+ /**
+ * End to end test for delete with body. We test it explicitly as it is not supported
+ * out of the box by {@link org.apache.http.client.HttpClient}.
+ * Exercises the test http server ability to send back whatever body it received.
+ */
+ public void testDeleteWithBody() throws Exception {
+ bodyTest("DELETE");
+ }
+
+ /**
+ * End to end test for get with body. We test it explicitly as it is not supported
+ * out of the box by {@link org.apache.http.client.HttpClient}.
+ * Exercises the test http server ability to send back whatever body it received.
+ */
+ public void testGetWithBody() throws Exception {
+ bodyTest("GET");
+ }
+
+ private void bodyTest(String method) throws Exception {
+ String requestBody = "{ \"field\": \"value\" }";
+ StringEntity entity = new StringEntity(requestBody);
+ Response esResponse;
+ String responseBody;
+ int statusCode = randomStatusCode(getRandom());
+ try (Response response = restClient.performRequest(method, "/" + statusCode,
+ Collections.emptyMap(), entity)) {
+ responseBody = EntityUtils.toString(response.getEntity());
+ esResponse = response;
+ } catch(ResponseException e) {
+ responseBody = e.getResponseBody();
+ esResponse = e.getResponse();
+ }
+ assertEquals(statusCode, esResponse.getStatusLine().getStatusCode());
+ assertEquals(requestBody, responseBody);
+ }
+}
diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java
new file mode 100644
index 00000000000..5a43a8d4d9e
--- /dev/null
+++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import org.apache.http.HttpHost;
+import org.apache.http.HttpRequest;
+import org.apache.http.ProtocolVersion;
+import org.apache.http.StatusLine;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.conn.ConnectTimeoutException;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.message.BasicStatusLine;
+import org.junit.Before;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import java.io.IOException;
+import java.net.SocketTimeoutException;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode;
+import static org.elasticsearch.client.RestClientTestUtil.randomErrorRetryStatusCode;
+import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod;
+import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests for {@link RestClient} behaviour against multiple hosts: fail-over, blacklisting etc.
+ * Relies on a mock http client to intercept requests and return desired responses based on request path.
+ */
+public class RestClientMultipleHostsTests extends RestClientTestCase {
+
+ private RestClient restClient;
+ private HttpHost[] httpHosts;
+ private TrackingFailureListener failureListener;
+
+ @Before
+ public void createRestClient() throws IOException {
+ CloseableHttpClient httpClient = mock(CloseableHttpClient.class);
+ when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer() {
+ @Override
+ public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable {
+ HttpHost httpHost = (HttpHost) invocationOnMock.getArguments()[0];
+ HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1];
+ //return the desired status code or exception depending on the path
+ if (request.getURI().getPath().equals("/soe")) {
+ throw new SocketTimeoutException(httpHost.toString());
+ } else if (request.getURI().getPath().equals("/coe")) {
+ throw new ConnectTimeoutException(httpHost.toString());
+ } else if (request.getURI().getPath().equals("/ioe")) {
+ throw new IOException(httpHost.toString());
+ }
+ int statusCode = Integer.parseInt(request.getURI().getPath().substring(1));
+ StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
+ return new CloseableBasicHttpResponse(statusLine);
+ }
+ });
+
+ int numHosts = RandomInts.randomIntBetween(getRandom(), 2, 5);
+ httpHosts = new HttpHost[numHosts];
+ for (int i = 0; i < numHosts; i++) {
+ httpHosts[i] = new HttpHost("localhost", 9200 + i);
+ }
+ failureListener = new TrackingFailureListener();
+ restClient = RestClient.builder(httpHosts).setHttpClient(httpClient).setFailureListener(failureListener).build();
+ }
+
+ public void testRoundRobinOkStatusCodes() throws Exception {
+ int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ for (int i = 0; i < numIters; i++) {
+ Set hostsSet = new HashSet<>();
+ Collections.addAll(hostsSet, httpHosts);
+ for (int j = 0; j < httpHosts.length; j++) {
+ int statusCode = randomOkStatusCode(getRandom());
+ try (Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) {
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
+ assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
+ }
+ }
+ assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
+ }
+ failureListener.assertNotCalled();
+ }
+
+ public void testRoundRobinNoRetryErrors() throws Exception {
+ int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ for (int i = 0; i < numIters; i++) {
+ Set hostsSet = new HashSet<>();
+ Collections.addAll(hostsSet, httpHosts);
+ for (int j = 0; j < httpHosts.length; j++) {
+ String method = randomHttpMethod(getRandom());
+ int statusCode = randomErrorNoRetryStatusCode(getRandom());
+ try (Response response = restClient.performRequest(method, "/" + statusCode)) {
+ if (method.equals("HEAD") && statusCode == 404) {
+ //no exception gets thrown although we got a 404
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(404));
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
+ assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
+ } else {
+ fail("request should have failed");
+ }
+ } catch(ResponseException e) {
+ if (method.equals("HEAD") && statusCode == 404) {
+ throw e;
+ }
+ Response response = e.getResponse();
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
+ assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
+ assertEquals(0, e.getSuppressed().length);
+ }
+ }
+ assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
+ }
+ failureListener.assertNotCalled();
+ }
+
+ public void testRoundRobinRetryErrors() throws Exception {
+ String retryEndpoint = randomErrorRetryEndpoint();
+ try {
+ restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
+ fail("request should have failed");
+ } catch(ResponseException e) {
+ Set hostsSet = new HashSet<>();
+ Collections.addAll(hostsSet, httpHosts);
+ //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
+ failureListener.assertCalled(httpHosts);
+ do {
+ Response response = e.getResponse();
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1))));
+ assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times",
+ hostsSet.remove(response.getHost()));
+ if (e.getSuppressed().length > 0) {
+ assertEquals(1, e.getSuppressed().length);
+ Throwable suppressed = e.getSuppressed()[0];
+ assertThat(suppressed, instanceOf(ResponseException.class));
+ e = (ResponseException)suppressed;
+ } else {
+ e = null;
+ }
+ } while(e != null);
+ assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
+ } catch(IOException e) {
+ Set hostsSet = new HashSet<>();
+ Collections.addAll(hostsSet, httpHosts);
+ //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each
+ failureListener.assertCalled(httpHosts);
+ do {
+ HttpHost httpHost = HttpHost.create(e.getMessage());
+ assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost));
+ if (e.getSuppressed().length > 0) {
+ assertEquals(1, e.getSuppressed().length);
+ Throwable suppressed = e.getSuppressed()[0];
+ assertThat(suppressed, instanceOf(IOException.class));
+ e = (IOException) suppressed;
+ } else {
+ e = null;
+ }
+ } while(e != null);
+ assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
+ }
+
+ int numIters = RandomInts.randomIntBetween(getRandom(), 2, 5);
+ for (int i = 1; i <= numIters; i++) {
+ //check that one different host is resurrected at each new attempt
+ Set hostsSet = new HashSet<>();
+ Collections.addAll(hostsSet, httpHosts);
+ for (int j = 0; j < httpHosts.length; j++) {
+ retryEndpoint = randomErrorRetryEndpoint();
+ try {
+ restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
+ fail("request should have failed");
+ } catch(ResponseException e) {
+ Response response = e.getResponse();
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1))));
+ assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times",
+ hostsSet.remove(response.getHost()));
+ //after the first request, all hosts are blacklisted, a single one gets resurrected each time
+ failureListener.assertCalled(response.getHost());
+ assertEquals(0, e.getSuppressed().length);
+ } catch(IOException e) {
+ HttpHost httpHost = HttpHost.create(e.getMessage());
+ assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost));
+ //after the first request, all hosts are blacklisted, a single one gets resurrected each time
+ failureListener.assertCalled(httpHost);
+ assertEquals(0, e.getSuppressed().length);
+ }
+ }
+ assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
+ if (getRandom().nextBoolean()) {
+ //mark one host back alive through a successful request and check that all requests after that are sent to it
+ HttpHost selectedHost = null;
+ int iters = RandomInts.randomIntBetween(getRandom(), 2, 10);
+ for (int y = 0; y < iters; y++) {
+ int statusCode = randomErrorNoRetryStatusCode(getRandom());
+ Response response;
+ try (Response esResponse = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) {
+ response = esResponse;
+ }
+ catch(ResponseException e) {
+ response = e.getResponse();
+ }
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
+ if (selectedHost == null) {
+ selectedHost = response.getHost();
+ } else {
+ assertThat(response.getHost(), equalTo(selectedHost));
+ }
+ }
+ failureListener.assertNotCalled();
+ //let the selected host catch up on number of failures, it gets selected a consecutive number of times as it's the one
+ //selected to be retried earlier (due to lower number of failures) till all the hosts have the same number of failures
+ for (int y = 0; y < i + 1; y++) {
+ retryEndpoint = randomErrorRetryEndpoint();
+ try {
+ restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
+ fail("request should have failed");
+ } catch(ResponseException e) {
+ Response response = e.getResponse();
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1))));
+ assertThat(response.getHost(), equalTo(selectedHost));
+ failureListener.assertCalled(selectedHost);
+ } catch(IOException e) {
+ HttpHost httpHost = HttpHost.create(e.getMessage());
+ assertThat(httpHost, equalTo(selectedHost));
+ failureListener.assertCalled(selectedHost);
+ }
+ }
+ }
+ }
+ }
+
+ private static String randomErrorRetryEndpoint() {
+ switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) {
+ case 0:
+ return "/" + randomErrorRetryStatusCode(getRandom());
+ case 1:
+ return "/coe";
+ case 2:
+ return "/soe";
+ case 3:
+ return "/ioe";
+ }
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java
new file mode 100644
index 00000000000..b250614b91b
--- /dev/null
+++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java
@@ -0,0 +1,450 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import org.apache.http.Header;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpEntityEnclosingRequest;
+import org.apache.http.HttpHost;
+import org.apache.http.HttpRequest;
+import org.apache.http.ProtocolVersion;
+import org.apache.http.StatusLine;
+import org.apache.http.client.methods.CloseableHttpResponse;
+import org.apache.http.client.methods.HttpHead;
+import org.apache.http.client.methods.HttpOptions;
+import org.apache.http.client.methods.HttpPatch;
+import org.apache.http.client.methods.HttpPost;
+import org.apache.http.client.methods.HttpPut;
+import org.apache.http.client.methods.HttpTrace;
+import org.apache.http.client.methods.HttpUriRequest;
+import org.apache.http.client.utils.URIBuilder;
+import org.apache.http.conn.ConnectTimeoutException;
+import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.CloseableHttpClient;
+import org.apache.http.message.BasicHeader;
+import org.apache.http.message.BasicStatusLine;
+import org.apache.http.util.EntityUtils;
+import org.junit.Before;
+import org.mockito.ArgumentCaptor;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import java.io.IOException;
+import java.net.SocketTimeoutException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes;
+import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
+import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes;
+import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod;
+import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.hamcrest.CoreMatchers.instanceOf;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests for basic functionality of {@link RestClient} against one single host: tests http requests being sent, headers,
+ * body, different status codes and corresponding responses/exceptions.
+ * Relies on a mock http client to intercept requests and return desired responses based on request path.
+ */
+public class RestClientSingleHostTests extends RestClientTestCase {
+
+ private RestClient restClient;
+ private Header[] defaultHeaders;
+ private HttpHost httpHost;
+ private CloseableHttpClient httpClient;
+ private TrackingFailureListener failureListener;
+
+ @Before
+ public void createRestClient() throws IOException {
+ httpClient = mock(CloseableHttpClient.class);
+ when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer() {
+ @Override
+ public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable {
+ HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1];
+ //return the desired status code or exception depending on the path
+ if (request.getURI().getPath().equals("/soe")) {
+ throw new SocketTimeoutException();
+ } else if (request.getURI().getPath().equals("/coe")) {
+ throw new ConnectTimeoutException();
+ }
+ int statusCode = Integer.parseInt(request.getURI().getPath().substring(1));
+ StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
+
+ CloseableHttpResponse httpResponse = new CloseableBasicHttpResponse(statusLine);
+ //return the same body that was sent
+ if (request instanceof HttpEntityEnclosingRequest) {
+ HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity();
+ if (entity != null) {
+ assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable());
+ httpResponse.setEntity(entity);
+ }
+ }
+ //return the same headers that were sent
+ httpResponse.setHeaders(request.getAllHeaders());
+ return httpResponse;
+ }
+ });
+ int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
+ defaultHeaders = new Header[numHeaders];
+ for (int i = 0; i < numHeaders; i++) {
+ String headerName = "Header-default" + (getRandom().nextBoolean() ? i : "");
+ String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
+ defaultHeaders[i] = new BasicHeader(headerName, headerValue);
+ }
+ httpHost = new HttpHost("localhost", 9200);
+ failureListener = new TrackingFailureListener();
+ restClient = RestClient.builder(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders)
+ .setFailureListener(failureListener).build();
+ }
+
+ /**
+ * Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client
+ */
+ public void testInternalHttpRequest() throws Exception {
+ ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpUriRequest.class);
+ int times = 0;
+ for (String httpMethod : getHttpMethods()) {
+ HttpUriRequest expectedRequest = performRandomRequest(httpMethod);
+ verify(httpClient, times(++times)).execute(any(HttpHost.class), requestArgumentCaptor.capture());
+ HttpUriRequest actualRequest = requestArgumentCaptor.getValue();
+ assertEquals(expectedRequest.getURI(), actualRequest.getURI());
+ assertEquals(expectedRequest.getClass(), actualRequest.getClass());
+ assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders());
+ if (expectedRequest instanceof HttpEntityEnclosingRequest) {
+ HttpEntity expectedEntity = ((HttpEntityEnclosingRequest) expectedRequest).getEntity();
+ if (expectedEntity != null) {
+ HttpEntity actualEntity = ((HttpEntityEnclosingRequest) actualRequest).getEntity();
+ assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity));
+ }
+ }
+ }
+ }
+
+ public void testSetHosts() throws IOException {
+ try {
+ restClient.setHosts((HttpHost[]) null);
+ fail("setHosts should have failed");
+ } catch (IllegalArgumentException e) {
+ assertEquals("hosts must not be null nor empty", e.getMessage());
+ }
+ try {
+ restClient.setHosts();
+ fail("setHosts should have failed");
+ } catch (IllegalArgumentException e) {
+ assertEquals("hosts must not be null nor empty", e.getMessage());
+ }
+ try {
+ restClient.setHosts((HttpHost) null);
+ fail("setHosts should have failed");
+ } catch (NullPointerException e) {
+ assertEquals("host cannot be null", e.getMessage());
+ }
+ try {
+ restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201));
+ fail("setHosts should have failed");
+ } catch (NullPointerException e) {
+ assertEquals("host cannot be null", e.getMessage());
+ }
+ }
+
+ /**
+ * End to end test for ok status codes
+ */
+ public void testOkStatusCodes() throws Exception {
+ for (String method : getHttpMethods()) {
+ for (int okStatusCode : getOkStatusCodes()) {
+ Response response = performRequest(method, "/" + okStatusCode);
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
+ }
+ }
+ failureListener.assertNotCalled();
+ }
+
+ /**
+ * End to end test for error status codes: they should cause an exception to be thrown, apart from 404 with HEAD requests
+ */
+ public void testErrorStatusCodes() throws Exception {
+ for (String method : getHttpMethods()) {
+ //error status codes should cause an exception to be thrown
+ for (int errorStatusCode : getAllErrorStatusCodes()) {
+ try (Response response = performRequest(method, "/" + errorStatusCode)) {
+ if (method.equals("HEAD") && errorStatusCode == 404) {
+ //no exception gets thrown although we got a 404
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode));
+ } else {
+ fail("request should have failed");
+ }
+ } catch(ResponseException e) {
+ if (method.equals("HEAD") && errorStatusCode == 404) {
+ throw e;
+ }
+ assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(errorStatusCode));
+ }
+ if (errorStatusCode <= 500) {
+ failureListener.assertNotCalled();
+ } else {
+ failureListener.assertCalled(httpHost);
+ }
+ }
+ }
+ }
+
+ public void testIOExceptions() throws IOException {
+ for (String method : getHttpMethods()) {
+ //IOExceptions should be let bubble up
+ try {
+ performRequest(method, "/coe");
+ fail("request should have failed");
+ } catch(IOException e) {
+ assertThat(e, instanceOf(ConnectTimeoutException.class));
+ }
+ failureListener.assertCalled(httpHost);
+ try {
+ performRequest(method, "/soe");
+ fail("request should have failed");
+ } catch(IOException e) {
+ assertThat(e, instanceOf(SocketTimeoutException.class));
+ }
+ failureListener.assertCalled(httpHost);
+ }
+ }
+
+ /**
+ * End to end test for request and response body. Exercises the mock http client ability to send back
+ * whatever body it has received.
+ */
+ public void testBody() throws Exception {
+ String body = "{ \"field\": \"value\" }";
+ StringEntity entity = new StringEntity(body);
+ for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) {
+ for (int okStatusCode : getOkStatusCodes()) {
+ try (Response response = restClient.performRequest(method, "/" + okStatusCode,
+ Collections.emptyMap(), entity)) {
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
+ assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
+ }
+ }
+ for (int errorStatusCode : getAllErrorStatusCodes()) {
+ try {
+ restClient.performRequest(method, "/" + errorStatusCode, Collections.emptyMap(), entity);
+ fail("request should have failed");
+ } catch(ResponseException e) {
+ Response response = e.getResponse();
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode));
+ assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
+ }
+ }
+ }
+ for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) {
+ try {
+ restClient.performRequest(method, "/" + randomStatusCode(getRandom()), Collections.emptyMap(), entity);
+ fail("request should have failed");
+ } catch(UnsupportedOperationException e) {
+ assertThat(e.getMessage(), equalTo(method + " with body is not supported"));
+ }
+ }
+ }
+
+ public void testNullHeaders() throws Exception {
+ String method = randomHttpMethod(getRandom());
+ int statusCode = randomStatusCode(getRandom());
+ try {
+ performRequest(method, "/" + statusCode, (Header[])null);
+ fail("request should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("request headers must not be null", e.getMessage());
+ }
+ try {
+ performRequest(method, "/" + statusCode, (Header)null);
+ fail("request should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("request header must not be null", e.getMessage());
+ }
+ }
+
+ public void testNullParams() throws Exception {
+ String method = randomHttpMethod(getRandom());
+ int statusCode = randomStatusCode(getRandom());
+ try {
+ restClient.performRequest(method, "/" + statusCode, (Map)null);
+ fail("request should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("params must not be null", e.getMessage());
+ }
+ try {
+ restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null);
+ fail("request should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("params must not be null", e.getMessage());
+ }
+ }
+
+ /**
+ * End to end test for request and response headers. Exercises the mock http client ability to send back
+ * whatever headers it has received.
+ */
+ public void testHeaders() throws Exception {
+ for (String method : getHttpMethods()) {
+ Map expectedHeaders = new HashMap<>();
+ for (Header defaultHeader : defaultHeaders) {
+ expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue());
+ }
+ int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ Header[] headers = new Header[numHeaders];
+ for (int i = 0; i < numHeaders; i++) {
+ String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
+ String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
+ headers[i] = new BasicHeader(headerName, headerValue);
+ expectedHeaders.put(headerName, headerValue);
+ }
+
+ int statusCode = randomStatusCode(getRandom());
+ Response esResponse;
+ try (Response response = restClient.performRequest(method, "/" + statusCode,
+ Collections.emptyMap(), null, headers)) {
+ esResponse = response;
+ } catch(ResponseException e) {
+ esResponse = e.getResponse();
+ }
+ assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode));
+ for (Header responseHeader : esResponse.getHeaders()) {
+ String headerValue = expectedHeaders.remove(responseHeader.getName());
+ assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue);
+ }
+ assertEquals("some headers that were sent weren't returned " + expectedHeaders, 0, expectedHeaders.size());
+ }
+ }
+
+ private HttpUriRequest performRandomRequest(String method) throws IOException, URISyntaxException {
+ String uriAsString = "/" + randomStatusCode(getRandom());
+ URIBuilder uriBuilder = new URIBuilder(uriAsString);
+ Map params = Collections.emptyMap();
+ boolean hasParams = randomBoolean();
+ if (hasParams) {
+ int numParams = RandomInts.randomIntBetween(getRandom(), 1, 3);
+ params = new HashMap<>(numParams);
+ for (int i = 0; i < numParams; i++) {
+ String paramKey = "param-" + i;
+ String paramValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
+ params.put(paramKey, paramValue);
+ uriBuilder.addParameter(paramKey, paramValue);
+ }
+ }
+ URI uri = uriBuilder.build();
+
+ HttpUriRequest request;
+ switch(method) {
+ case "DELETE":
+ request = new HttpDeleteWithEntity(uri);
+ break;
+ case "GET":
+ request = new HttpGetWithEntity(uri);
+ break;
+ case "HEAD":
+ request = new HttpHead(uri);
+ break;
+ case "OPTIONS":
+ request = new HttpOptions(uri);
+ break;
+ case "PATCH":
+ request = new HttpPatch(uri);
+ break;
+ case "POST":
+ request = new HttpPost(uri);
+ break;
+ case "PUT":
+ request = new HttpPut(uri);
+ break;
+ case "TRACE":
+ request = new HttpTrace(uri);
+ break;
+ default:
+ throw new UnsupportedOperationException("method not supported: " + method);
+ }
+
+ HttpEntity entity = null;
+ boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
+ if (hasBody) {
+ entity = new StringEntity(RandomStrings.randomAsciiOfLengthBetween(getRandom(), 10, 100));
+ ((HttpEntityEnclosingRequest) request).setEntity(entity);
+ }
+
+ Header[] headers = new Header[0];
+ for (Header defaultHeader : defaultHeaders) {
+ //default headers are expected but not sent for each request
+ request.setHeader(defaultHeader);
+ }
+ if (getRandom().nextBoolean()) {
+ int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ headers = new Header[numHeaders];
+ for (int i = 0; i < numHeaders; i++) {
+ String headerName = "Header" + (getRandom().nextBoolean() ? i : "");
+ String headerValue = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 3, 10);
+ BasicHeader basicHeader = new BasicHeader(headerName, headerValue);
+ headers[i] = basicHeader;
+ request.setHeader(basicHeader);
+ }
+ }
+
+ try {
+ if (hasParams == false && hasBody == false && randomBoolean()) {
+ restClient.performRequest(method, uriAsString, headers);
+ } else if (hasBody == false && randomBoolean()) {
+ restClient.performRequest(method, uriAsString, params, headers);
+ } else {
+ restClient.performRequest(method, uriAsString, params, entity, headers);
+ }
+ } catch(ResponseException e) {
+ //all good
+ }
+ return request;
+ }
+
+ private Response performRequest(String method, String endpoint, Header... headers) throws IOException {
+ switch(randomIntBetween(0, 2)) {
+ case 0:
+ return restClient.performRequest(method, endpoint, headers);
+ case 1:
+ return restClient.performRequest(method, endpoint, Collections.emptyMap(), headers);
+ case 2:
+ return restClient.performRequest(method, endpoint, Collections.emptyMap(), null, headers);
+ default:
+ throw new UnsupportedOperationException();
+ }
+ }
+}
diff --git a/client/rest/src/test/java/org/elasticsearch/client/TrackingFailureListener.java b/client/rest/src/test/java/org/elasticsearch/client/TrackingFailureListener.java
new file mode 100644
index 00000000000..35842823923
--- /dev/null
+++ b/client/rest/src/test/java/org/elasticsearch/client/TrackingFailureListener.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import org.apache.http.HttpHost;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.containsInAnyOrder;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+
+/**
+ * {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called
+ */
+class TrackingFailureListener extends RestClient.FailureListener {
+ private Set hosts = new HashSet<>();
+
+ @Override
+ public void onFailure(HttpHost host) throws IOException {
+ hosts.add(host);
+ }
+
+ void assertCalled(HttpHost... hosts) {
+ assertEquals(hosts.length, this.hosts.size());
+ assertThat(this.hosts, containsInAnyOrder(hosts));
+ this.hosts.clear();
+ }
+
+ void assertNotCalled() {
+ assertEquals(0, hosts.size());
+ }
+}
\ No newline at end of file
diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle
new file mode 100644
index 00000000000..7cf16ee85d8
--- /dev/null
+++ b/client/sniffer/build.gradle
@@ -0,0 +1,88 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.gradle.precommit.PrecommitTasks
+import org.gradle.api.JavaVersion
+
+apply plugin: 'elasticsearch.build'
+apply plugin: 'ru.vyarus.animalsniffer'
+
+targetCompatibility = JavaVersion.VERSION_1_7
+sourceCompatibility = JavaVersion.VERSION_1_7
+
+group = 'org.elasticsearch.client'
+
+dependencies {
+ compile "org.elasticsearch.client:rest:${version}"
+ compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
+ compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
+ compile "commons-codec:commons-codec:${versions.commonscodec}"
+ compile "commons-logging:commons-logging:${versions.commonslogging}"
+ compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
+
+ testCompile "org.elasticsearch.client:test:${version}"
+ testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
+ testCompile "junit:junit:${versions.junit}"
+ testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
+ testCompile "org.elasticsearch:securemock:${versions.securemock}"
+ testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15"
+ signature "org.codehaus.mojo.signature:java17:1.0@signature"
+}
+
+forbiddenApisMain {
+ //client does not depend on core, so only jdk signatures should be checked
+ signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
+}
+
+forbiddenApisTest {
+ //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
+ bundledSignatures -= 'jdk-non-portable'
+ bundledSignatures += 'jdk-internal'
+ //client does not depend on core, so only jdk signatures should be checked
+ signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
+}
+
+//JarHell is part of es core, which we don't want to pull in
+jarHell.enabled=false
+
+namingConventions {
+ testClass = 'org.elasticsearch.client.RestClientTestCase'
+ //we don't have integration tests
+ skipIntegTestInDisguise = true
+}
+
+dependencyLicenses {
+ dependencies = project.configurations.runtime.fileCollection {
+ it.group.startsWith('org.elasticsearch') == false
+ }
+}
+
+thirdPartyAudit.excludes = [
+ //commons-logging optional dependencies
+ 'org.apache.avalon.framework.logger.Logger',
+ 'org.apache.log.Hierarchy',
+ 'org.apache.log.Logger',
+ 'org.apache.log4j.Category',
+ 'org.apache.log4j.Level',
+ 'org.apache.log4j.Logger',
+ 'org.apache.log4j.Priority',
+ //commons-logging provided dependencies
+ 'javax.servlet.ServletContextEvent',
+ 'javax.servlet.ServletContextListener'
+]
diff --git a/client/sniffer/licenses/commons-codec-1.10.jar.sha1 b/client/sniffer/licenses/commons-codec-1.10.jar.sha1
new file mode 100644
index 00000000000..3fe8682a1b0
--- /dev/null
+++ b/client/sniffer/licenses/commons-codec-1.10.jar.sha1
@@ -0,0 +1 @@
+4b95f4897fa13f2cd904aee711aeafc0c5295cd8
\ No newline at end of file
diff --git a/distribution/src/main/resources/LICENSE.txt b/client/sniffer/licenses/commons-codec-LICENSE.txt
similarity index 100%
rename from distribution/src/main/resources/LICENSE.txt
rename to client/sniffer/licenses/commons-codec-LICENSE.txt
diff --git a/client/sniffer/licenses/commons-codec-NOTICE.txt b/client/sniffer/licenses/commons-codec-NOTICE.txt
new file mode 100644
index 00000000000..1da9af50f60
--- /dev/null
+++ b/client/sniffer/licenses/commons-codec-NOTICE.txt
@@ -0,0 +1,17 @@
+Apache Commons Codec
+Copyright 2002-2014 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
+src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java
+contains test data from http://aspell.net/test/orig/batch0.tab.
+Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org)
+
+===============================================================================
+
+The content of package org.apache.commons.codec.language.bm has been translated
+from the original php source code available at http://stevemorse.org/phoneticinfo.htm
+with permission from the original authors.
+Original source copyright:
+Copyright (c) 2008 Alexander Beider & Stephen P. Morse.
diff --git a/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1 b/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1
new file mode 100644
index 00000000000..5b8f029e582
--- /dev/null
+++ b/client/sniffer/licenses/commons-logging-1.1.3.jar.sha1
@@ -0,0 +1 @@
+f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f
\ No newline at end of file
diff --git a/plugins/discovery-azure/LICENSE.txt b/client/sniffer/licenses/commons-logging-LICENSE.txt
similarity index 100%
rename from plugins/discovery-azure/LICENSE.txt
rename to client/sniffer/licenses/commons-logging-LICENSE.txt
diff --git a/client/sniffer/licenses/commons-logging-NOTICE.txt b/client/sniffer/licenses/commons-logging-NOTICE.txt
new file mode 100644
index 00000000000..556bd03951d
--- /dev/null
+++ b/client/sniffer/licenses/commons-logging-NOTICE.txt
@@ -0,0 +1,6 @@
+Apache Commons Logging
+Copyright 2003-2014 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
diff --git a/client/sniffer/licenses/httpclient-4.5.2.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.2.jar.sha1
new file mode 100644
index 00000000000..6937112a09f
--- /dev/null
+++ b/client/sniffer/licenses/httpclient-4.5.2.jar.sha1
@@ -0,0 +1 @@
+733db77aa8d9b2d68015189df76ab06304406e50
\ No newline at end of file
diff --git a/client/sniffer/licenses/httpclient-LICENSE.txt b/client/sniffer/licenses/httpclient-LICENSE.txt
new file mode 100644
index 00000000000..32f01eda18f
--- /dev/null
+++ b/client/sniffer/licenses/httpclient-LICENSE.txt
@@ -0,0 +1,558 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+=========================================================================
+
+This project includes Public Suffix List copied from
+
+licensed under the terms of the Mozilla Public License, v. 2.0
+
+Full license text:
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/client/sniffer/licenses/httpclient-NOTICE.txt b/client/sniffer/licenses/httpclient-NOTICE.txt
new file mode 100644
index 00000000000..91e5c40c4c6
--- /dev/null
+++ b/client/sniffer/licenses/httpclient-NOTICE.txt
@@ -0,0 +1,6 @@
+Apache HttpComponents Client
+Copyright 1999-2016 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
diff --git a/client/sniffer/licenses/httpcore-4.4.4.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.4.jar.sha1
new file mode 100644
index 00000000000..ef0c257e012
--- /dev/null
+++ b/client/sniffer/licenses/httpcore-4.4.4.jar.sha1
@@ -0,0 +1 @@
+b31526a230871fbe285fbcbe2813f9c0839ae9b0
\ No newline at end of file
diff --git a/client/sniffer/licenses/httpcore-LICENSE.txt b/client/sniffer/licenses/httpcore-LICENSE.txt
new file mode 100644
index 00000000000..32f01eda18f
--- /dev/null
+++ b/client/sniffer/licenses/httpcore-LICENSE.txt
@@ -0,0 +1,558 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+=========================================================================
+
+This project includes Public Suffix List copied from
+
+licensed under the terms of the Mozilla Public License, v. 2.0
+
+Full license text:
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/client/sniffer/licenses/httpcore-NOTICE.txt b/client/sniffer/licenses/httpcore-NOTICE.txt
new file mode 100644
index 00000000000..91e5c40c4c6
--- /dev/null
+++ b/client/sniffer/licenses/httpcore-NOTICE.txt
@@ -0,0 +1,6 @@
+Apache HttpComponents Client
+Copyright 1999-2016 The Apache Software Foundation
+
+This product includes software developed at
+The Apache Software Foundation (http://www.apache.org/).
+
diff --git a/client/sniffer/licenses/jackson-core-2.7.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.7.1.jar.sha1
new file mode 100644
index 00000000000..73831ed2d51
--- /dev/null
+++ b/client/sniffer/licenses/jackson-core-2.7.1.jar.sha1
@@ -0,0 +1 @@
+4127b62db028f981e81caa248953c0899d720f98
\ No newline at end of file
diff --git a/plugins/discovery-azure/licenses/jackson-LICENSE b/client/sniffer/licenses/jackson-core-LICENSE
similarity index 100%
rename from plugins/discovery-azure/licenses/jackson-LICENSE
rename to client/sniffer/licenses/jackson-core-LICENSE
diff --git a/plugins/discovery-azure/licenses/jackson-NOTICE b/client/sniffer/licenses/jackson-core-NOTICE
similarity index 100%
rename from plugins/discovery-azure/licenses/jackson-NOTICE
rename to client/sniffer/licenses/jackson-core-NOTICE
diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java
new file mode 100644
index 00000000000..bfe21f5e7d1
--- /dev/null
+++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java
@@ -0,0 +1,194 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.sniff;
+
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonParser;
+import com.fasterxml.jackson.core.JsonToken;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.http.HttpEntity;
+import org.apache.http.HttpHost;
+import org.elasticsearch.client.Response;
+import org.elasticsearch.client.RestClient;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back.
+ * Compatible with elasticsearch 5.x and 2.x.
+ */
+public class HostsSniffer {
+
+ private static final Log logger = LogFactory.getLog(HostsSniffer.class);
+
+ private final RestClient restClient;
+ private final Map sniffRequestParams;
+ private final Scheme scheme;
+ private final JsonFactory jsonFactory = new JsonFactory();
+
+ protected HostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) {
+ this.restClient = restClient;
+ this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeoutMillis + "ms");
+ this.scheme = scheme;
+ }
+
+ /**
+ * Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts
+ */
+ public List sniffHosts() throws IOException {
+ try (Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams)) {
+ return readHosts(response.getEntity());
+ }
+ }
+
+ private List readHosts(HttpEntity entity) throws IOException {
+ try (InputStream inputStream = entity.getContent()) {
+ JsonParser parser = jsonFactory.createParser(inputStream);
+ if (parser.nextToken() != JsonToken.START_OBJECT) {
+ throw new IOException("expected data to start with an object");
+ }
+ List hosts = new ArrayList<>();
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
+ if ("nodes".equals(parser.getCurrentName())) {
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ JsonToken token = parser.nextToken();
+ assert token == JsonToken.START_OBJECT;
+ String nodeId = parser.getCurrentName();
+ HttpHost sniffedHost = readHost(nodeId, parser, this.scheme);
+ if (sniffedHost != null) {
+ logger.trace("adding node [" + nodeId + "]");
+ hosts.add(sniffedHost);
+ }
+ }
+ } else {
+ parser.skipChildren();
+ }
+ }
+ }
+ return hosts;
+ }
+ }
+
+ private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException {
+ HttpHost httpHost = null;
+ String fieldName = null;
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ if (parser.getCurrentToken() == JsonToken.FIELD_NAME) {
+ fieldName = parser.getCurrentName();
+ } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
+ if ("http".equals(fieldName)) {
+ while (parser.nextToken() != JsonToken.END_OBJECT) {
+ if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) {
+ URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
+ httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(),
+ boundAddressAsURI.getScheme());
+ } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
+ parser.skipChildren();
+ }
+ }
+ } else {
+ parser.skipChildren();
+ }
+ }
+ }
+ //http section is not present if http is not enabled on the node, ignore such nodes
+ if (httpHost == null) {
+ logger.debug("skipping node [" + nodeId + "] with http disabled");
+ return null;
+ }
+ return httpHost;
+ }
+
+ /**
+ * Returns a new {@link Builder} to help with {@link HostsSniffer} creation.
+ */
+ public static Builder builder(RestClient restClient) {
+ return new Builder(restClient);
+ }
+
+ public enum Scheme {
+ HTTP("http"), HTTPS("https");
+
+ private final String name;
+
+ Scheme(String name) {
+ this.name = name;
+ }
+
+ @Override
+ public String toString() {
+ return name;
+ }
+ }
+
+ /**
+ * HostsSniffer builder. Helps creating a new {@link HostsSniffer}.
+ */
+ public static class Builder {
+ public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1);
+
+ private final RestClient restClient;
+ private long sniffRequestTimeoutMillis = DEFAULT_SNIFF_REQUEST_TIMEOUT;
+ private Scheme scheme = Scheme.HTTP;
+
+ private Builder(RestClient restClient) {
+ Objects.requireNonNull(restClient, "restClient cannot be null");
+ this.restClient = restClient;
+ }
+
+ /**
+ * Sets the sniff request timeout (in milliseconds) to be passed in as a query string parameter to elasticsearch.
+ * Allows to halt the request without any failure, as only the nodes that have responded within this timeout will be returned.
+ */
+ public Builder setSniffRequestTimeoutMillis(int sniffRequestTimeoutMillis) {
+ if (sniffRequestTimeoutMillis <= 0) {
+ throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0");
+ }
+ this.sniffRequestTimeoutMillis = sniffRequestTimeoutMillis;
+ return this;
+ }
+
+ /**
+ * Sets the scheme to associate sniffed nodes with (as it is not returned by elasticsearch)
+ */
+ public Builder setScheme(Scheme scheme) {
+ Objects.requireNonNull(scheme, "scheme cannot be null");
+ this.scheme = scheme;
+ return this;
+ }
+
+ /**
+ * Creates a new {@link HostsSniffer} instance given the provided configuration
+ */
+ public HostsSniffer build() {
+ return new HostsSniffer(restClient, sniffRequestTimeoutMillis, scheme);
+ }
+ }
+}
diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java
new file mode 100644
index 00000000000..76350057141
--- /dev/null
+++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.sniff;
+
+import org.apache.http.HttpHost;
+import org.elasticsearch.client.RestClient;
+
+import java.io.IOException;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * {@link org.elasticsearch.client.RestClient.FailureListener} implementation that allows to perform
+ * sniffing on failure. Gets notified whenever a failure happens and uses a {@link Sniffer} instance
+ * to manually reload hosts and sets them back to the {@link RestClient}. The {@link Sniffer} instance
+ * needs to be lazily set through {@link #setSniffer(Sniffer)}.
+ */
+public class SniffOnFailureListener extends RestClient.FailureListener {
+
+ private volatile Sniffer sniffer;
+ private final AtomicBoolean set;
+
+ public SniffOnFailureListener() {
+ this.set = new AtomicBoolean(false);
+ }
+
+ /**
+ * Sets the {@link Sniffer} instance used to perform sniffing
+ * @throws IllegalStateException if the sniffer was already set, as it can only be set once
+ */
+ public void setSniffer(Sniffer sniffer) {
+ Objects.requireNonNull(sniffer, "sniffer must not be null");
+ if (set.compareAndSet(false, true)) {
+ this.sniffer = sniffer;
+ } else {
+ throw new IllegalStateException("sniffer can only be set once");
+ }
+ }
+
+ @Override
+ public void onFailure(HttpHost host) throws IOException {
+ if (sniffer == null) {
+ throw new IllegalStateException("sniffer was not set, unable to sniff on failure");
+ }
+ //re-sniff immediately but take out the node that failed
+ sniffer.sniffOnFailure(host);
+ }
+}
diff --git a/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java
new file mode 100644
index 00000000000..74a28cdd222
--- /dev/null
+++ b/client/sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java
@@ -0,0 +1,206 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.sniff;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.http.HttpHost;
+import org.elasticsearch.client.RestClient;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+/**
+ * Class responsible for sniffing nodes from an elasticsearch cluster and setting them to a provided instance of {@link RestClient}.
+ * Must be created via {@link Builder}, which allows to set all of the different options or rely on defaults.
+ * A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance.
+ * It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to
+ * {@link org.elasticsearch.client.RestClient.Builder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation
+ * needs to be lazily set to the previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}.
+ */
+public final class Sniffer implements Closeable {
+
+ private static final Log logger = LogFactory.getLog(Sniffer.class);
+
+ private final Task task;
+
+ private Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) {
+ this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay);
+ }
+
+ /**
+ * Triggers a new sniffing round and explicitly takes out the failed host provided as argument
+ */
+ public void sniffOnFailure(HttpHost failedHost) {
+ this.task.sniffOnFailure(failedHost);
+ }
+
+ @Override
+ public void close() throws IOException {
+ task.shutdown();
+ }
+
+ private static class Task implements Runnable {
+ private final HostsSniffer hostsSniffer;
+ private final RestClient restClient;
+
+ private final long sniffIntervalMillis;
+ private final long sniffAfterFailureDelayMillis;
+ private final ScheduledExecutorService scheduledExecutorService;
+ private final AtomicBoolean running = new AtomicBoolean(false);
+ private ScheduledFuture> scheduledFuture;
+
+ private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffIntervalMillis, long sniffAfterFailureDelayMillis) {
+ this.hostsSniffer = hostsSniffer;
+ this.restClient = restClient;
+ this.sniffIntervalMillis = sniffIntervalMillis;
+ this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis;
+ this.scheduledExecutorService = Executors.newScheduledThreadPool(1);
+ scheduleNextRun(0);
+ }
+
+ synchronized void scheduleNextRun(long delayMillis) {
+ if (scheduledExecutorService.isShutdown() == false) {
+ try {
+ if (scheduledFuture != null) {
+ //regardless of when the next sniff is scheduled, cancel it and schedule a new one with updated delay
+ this.scheduledFuture.cancel(false);
+ }
+ logger.debug("scheduling next sniff in " + delayMillis + " ms");
+ this.scheduledFuture = this.scheduledExecutorService.schedule(this, delayMillis, TimeUnit.MILLISECONDS);
+ } catch(Exception e) {
+ logger.error("error while scheduling next sniffer task", e);
+ }
+ }
+ }
+
+ @Override
+ public void run() {
+ sniff(null, sniffIntervalMillis);
+ }
+
+ void sniffOnFailure(HttpHost failedHost) {
+ sniff(failedHost, sniffAfterFailureDelayMillis);
+ }
+
+ void sniff(HttpHost excludeHost, long nextSniffDelayMillis) {
+ if (running.compareAndSet(false, true)) {
+ try {
+ List sniffedHosts = hostsSniffer.sniffHosts();
+ logger.debug("sniffed hosts: " + sniffedHosts);
+ if (excludeHost != null) {
+ sniffedHosts.remove(excludeHost);
+ }
+ if (sniffedHosts.isEmpty()) {
+ logger.warn("no hosts to set, hosts will be updated at the next sniffing round");
+ } else {
+ this.restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()]));
+ }
+ } catch (Exception e) {
+ logger.error("error while sniffing nodes", e);
+ } finally {
+ scheduleNextRun(nextSniffDelayMillis);
+ running.set(false);
+ }
+ }
+ }
+
+ synchronized void shutdown() {
+ scheduledExecutorService.shutdown();
+ try {
+ if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) {
+ return;
+ }
+ scheduledExecutorService.shutdownNow();
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ }
+ }
+ }
+
+ /**
+ * Returns a new {@link Builder} to help with {@link Sniffer} creation.
+ */
+ public static Builder builder(RestClient restClient, HostsSniffer hostsSniffer) {
+ return new Builder(restClient, hostsSniffer);
+ }
+
+ /**
+ * Sniffer builder. Helps creating a new {@link Sniffer}.
+ */
+ public static final class Builder {
+ public static final long DEFAULT_SNIFF_INTERVAL = TimeUnit.MINUTES.toMillis(5);
+ public static final long DEFAULT_SNIFF_AFTER_FAILURE_DELAY = TimeUnit.MINUTES.toMillis(1);
+
+ private final RestClient restClient;
+ private final HostsSniffer hostsSniffer;
+ private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL;
+ private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY;
+
+ /**
+ * Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch,
+ * and the
+ */
+ private Builder(RestClient restClient, HostsSniffer hostsSniffer) {
+ Objects.requireNonNull(restClient, "restClient cannot be null");
+ this.restClient = restClient;
+ Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null");
+ this.hostsSniffer = hostsSniffer;
+ }
+
+ /**
+ * Sets the interval between consecutive ordinary sniff executions in milliseconds. Will be honoured when
+ * sniffOnFailure is disabled or when there are no failures between consecutive sniff executions.
+ * @throws IllegalArgumentException if sniffIntervalMillis is not greater than 0
+ */
+ public Builder setSniffIntervalMillis(int sniffIntervalMillis) {
+ if (sniffIntervalMillis <= 0) {
+ throw new IllegalArgumentException("sniffIntervalMillis must be greater than 0");
+ }
+ this.sniffIntervalMillis = sniffIntervalMillis;
+ return this;
+ }
+
+ /**
+ * Sets the delay of a sniff execution scheduled after a failure (in milliseconds)
+ */
+ public Builder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelayMillis) {
+ if (sniffAfterFailureDelayMillis <= 0) {
+ throw new IllegalArgumentException("sniffAfterFailureDelayMillis must be greater than 0");
+ }
+ this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis;
+ return this;
+ }
+
+ /**
+ * Creates the {@link Sniffer} based on the provided configuration.
+ */
+ public Sniffer build() {
+ return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis);
+ }
+ }
+}
diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java
new file mode 100644
index 00000000000..c167a3a104b
--- /dev/null
+++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.sniff;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.http.HttpHost;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestClientTestCase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+public class HostsSnifferBuilderTests extends RestClientTestCase {
+
+ public void testBuild() throws Exception {
+ try {
+ HostsSniffer.builder(null);
+ fail("should have failed");
+ } catch(NullPointerException e) {
+ assertEquals(e.getMessage(), "restClient cannot be null");
+ }
+
+ int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ HttpHost[] hosts = new HttpHost[numNodes];
+ for (int i = 0; i < numNodes; i++) {
+ hosts[i] = new HttpHost("localhost", 9200 + i);
+ }
+
+ try (RestClient client = RestClient.builder(hosts).build()) {
+ try {
+ HostsSniffer.builder(client).setScheme(null);
+ fail("should have failed");
+ } catch(NullPointerException e) {
+ assertEquals(e.getMessage(), "scheme cannot be null");
+ }
+
+ try {
+ HostsSniffer.builder(client).setSniffRequestTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
+ fail("should have failed");
+ } catch(IllegalArgumentException e) {
+ assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0");
+ }
+
+ HostsSniffer.Builder builder = HostsSniffer.builder(client);
+ if (getRandom().nextBoolean()) {
+ builder.setScheme(RandomPicks.randomFrom(getRandom(), HostsSniffer.Scheme.values()));
+ }
+ if (getRandom().nextBoolean()) {
+ builder.setSniffRequestTimeoutMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
+ }
+ assertNotNull(builder.build());
+ }
+ }
+}
diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java
new file mode 100644
index 00000000000..6e0c3a728d5
--- /dev/null
+++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java
@@ -0,0 +1,276 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.sniff;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.carrotsearch.randomizedtesting.generators.RandomStrings;
+import com.fasterxml.jackson.core.JsonFactory;
+import com.fasterxml.jackson.core.JsonGenerator;
+import com.sun.net.httpserver.HttpExchange;
+import com.sun.net.httpserver.HttpHandler;
+import com.sun.net.httpserver.HttpServer;
+import org.apache.http.Consts;
+import org.apache.http.HttpHost;
+import org.apache.http.client.methods.HttpGet;
+import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
+import org.elasticsearch.client.Response;
+import org.elasticsearch.client.ResponseException;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestClientTestCase;
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.StringWriter;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.URISyntaxException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.equalTo;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
+import static org.junit.Assert.fail;
+
+//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
+@IgnoreJRERequirement
+public class HostsSnifferTests extends RestClientTestCase {
+
+ private int sniffRequestTimeout;
+ private HostsSniffer.Scheme scheme;
+ private SniffResponse sniffResponse;
+ private HttpServer httpServer;
+
+ @Before
+ public void startHttpServer() throws IOException {
+ this.sniffRequestTimeout = RandomInts.randomIntBetween(getRandom(), 1000, 10000);
+ this.scheme = RandomPicks.randomFrom(getRandom(), HostsSniffer.Scheme.values());
+ if (rarely()) {
+ this.sniffResponse = SniffResponse.buildFailure();
+ } else {
+ this.sniffResponse = buildSniffResponse(scheme);
+ }
+ this.httpServer = createHttpServer(sniffResponse, sniffRequestTimeout);
+ this.httpServer.start();
+ }
+
+ @After
+ public void stopHttpServer() throws IOException {
+ httpServer.stop(0);
+ }
+
+ public void testSniffNodes() throws IOException, URISyntaxException {
+ HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
+ try (RestClient restClient = RestClient.builder(httpHost).build()) {
+ HostsSniffer.Builder builder = HostsSniffer.builder(restClient).setSniffRequestTimeoutMillis(sniffRequestTimeout);
+ if (scheme != HostsSniffer.Scheme.HTTP || randomBoolean()) {
+ builder.setScheme(scheme);
+ }
+ HostsSniffer sniffer = builder.build();
+ try {
+ List sniffedHosts = sniffer.sniffHosts();
+ if (sniffResponse.isFailure) {
+ fail("sniffNodes should have failed");
+ }
+ assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size()));
+ Iterator responseHostsIterator = sniffResponse.hosts.iterator();
+ for (HttpHost sniffedHost : sniffedHosts) {
+ assertEquals(sniffedHost, responseHostsIterator.next());
+ }
+ } catch(ResponseException e) {
+ Response response = e.getResponse();
+ if (sniffResponse.isFailure) {
+ assertThat(e.getMessage(), containsString("GET " + httpHost + "/_nodes/http?timeout=" + sniffRequestTimeout + "ms"));
+ assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode)));
+ assertThat(response.getHost(), equalTo(httpHost));
+ assertThat(response.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode));
+ assertThat(response.getRequestLine().toString(),
+ equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1"));
+ } else {
+ fail("sniffNodes should have succeeded: " + response.getStatusLine());
+ }
+ }
+ }
+ }
+
+ private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeoutMillis) throws IOException {
+ HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
+ httpServer.createContext("/_nodes/http", new ResponseHandler(sniffTimeoutMillis, sniffResponse));
+ return httpServer;
+ }
+
+ //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
+ @IgnoreJRERequirement
+ private static class ResponseHandler implements HttpHandler {
+ private final int sniffTimeoutMillis;
+ private final SniffResponse sniffResponse;
+
+ ResponseHandler(int sniffTimeoutMillis, SniffResponse sniffResponse) {
+ this.sniffTimeoutMillis = sniffTimeoutMillis;
+ this.sniffResponse = sniffResponse;
+ }
+
+ @Override
+ public void handle(HttpExchange httpExchange) throws IOException {
+ if (httpExchange.getRequestMethod().equals(HttpGet.METHOD_NAME)) {
+ if (httpExchange.getRequestURI().getRawQuery().equals("timeout=" + sniffTimeoutMillis + "ms")) {
+ String nodesInfoBody = sniffResponse.nodesInfoBody;
+ httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length());
+ try (OutputStream out = httpExchange.getResponseBody()) {
+ out.write(nodesInfoBody.getBytes(Consts.UTF_8));
+ return;
+ }
+ }
+ }
+ httpExchange.sendResponseHeaders(404, 0);
+ httpExchange.close();
+ }
+ }
+
+ private static SniffResponse buildSniffResponse(HostsSniffer.Scheme scheme) throws IOException {
+ int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ List hosts = new ArrayList<>(numNodes);
+ JsonFactory jsonFactory = new JsonFactory();
+ StringWriter writer = new StringWriter();
+ JsonGenerator generator = jsonFactory.createGenerator(writer);
+ generator.writeStartObject();
+ if (getRandom().nextBoolean()) {
+ generator.writeStringField("cluster_name", "elasticsearch");
+ }
+ if (getRandom().nextBoolean()) {
+ generator.writeObjectFieldStart("bogus_object");
+ generator.writeEndObject();
+ }
+ generator.writeObjectFieldStart("nodes");
+ for (int i = 0; i < numNodes; i++) {
+ String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10);
+ generator.writeObjectFieldStart(nodeId);
+ if (getRandom().nextBoolean()) {
+ generator.writeObjectFieldStart("bogus_object");
+ generator.writeEndObject();
+ }
+ if (getRandom().nextBoolean()) {
+ generator.writeArrayFieldStart("bogus_array");
+ generator.writeStartObject();
+ generator.writeEndObject();
+ generator.writeEndArray();
+ }
+ boolean isHttpEnabled = rarely() == false;
+ if (isHttpEnabled) {
+ String host = "host" + i;
+ int port = RandomInts.randomIntBetween(getRandom(), 9200, 9299);
+ HttpHost httpHost = new HttpHost(host, port, scheme.toString());
+ hosts.add(httpHost);
+ generator.writeObjectFieldStart("http");
+ if (getRandom().nextBoolean()) {
+ generator.writeArrayFieldStart("bound_address");
+ generator.writeString("[fe80::1]:" + port);
+ generator.writeString("[::1]:" + port);
+ generator.writeString("127.0.0.1:" + port);
+ generator.writeEndArray();
+ }
+ if (getRandom().nextBoolean()) {
+ generator.writeObjectFieldStart("bogus_object");
+ generator.writeEndObject();
+ }
+ generator.writeStringField("publish_address", httpHost.toHostString());
+ if (getRandom().nextBoolean()) {
+ generator.writeNumberField("max_content_length_in_bytes", 104857600);
+ }
+ generator.writeEndObject();
+ }
+ if (getRandom().nextBoolean()) {
+ String[] roles = {"master", "data", "ingest"};
+ int numRoles = RandomInts.randomIntBetween(getRandom(), 0, 3);
+ Set nodeRoles = new HashSet<>(numRoles);
+ for (int j = 0; j < numRoles; j++) {
+ String role;
+ do {
+ role = RandomPicks.randomFrom(getRandom(), roles);
+ } while(nodeRoles.add(role) == false);
+ }
+ generator.writeArrayFieldStart("roles");
+ for (String nodeRole : nodeRoles) {
+ generator.writeString(nodeRole);
+ }
+ generator.writeEndArray();
+ }
+ int numAttributes = RandomInts.randomIntBetween(getRandom(), 0, 3);
+ Map attributes = new HashMap<>(numAttributes);
+ for (int j = 0; j < numAttributes; j++) {
+ attributes.put("attr" + j, "value" + j);
+ }
+ if (numAttributes > 0) {
+ generator.writeObjectFieldStart("attributes");
+ }
+ for (Map.Entry entry : attributes.entrySet()) {
+ generator.writeStringField(entry.getKey(), entry.getValue());
+ }
+ if (numAttributes > 0) {
+ generator.writeEndObject();
+ }
+ generator.writeEndObject();
+ }
+ generator.writeEndObject();
+ generator.writeEndObject();
+ generator.close();
+ return SniffResponse.buildResponse(writer.toString(), hosts);
+ }
+
+ private static class SniffResponse {
+ private final String nodesInfoBody;
+ private final int nodesInfoResponseCode;
+ private final List hosts;
+ private final boolean isFailure;
+
+ SniffResponse(String nodesInfoBody, List hosts, boolean isFailure) {
+ this.nodesInfoBody = nodesInfoBody;
+ this.hosts = hosts;
+ this.isFailure = isFailure;
+ if (isFailure) {
+ this.nodesInfoResponseCode = randomErrorResponseCode();
+ } else {
+ this.nodesInfoResponseCode = 200;
+ }
+ }
+
+ static SniffResponse buildFailure() {
+ return new SniffResponse("", Collections.emptyList(), true);
+ }
+
+ static SniffResponse buildResponse(String nodesInfoBody, List hosts) {
+ return new SniffResponse(nodesInfoBody, hosts, false);
+ }
+ }
+
+ private static int randomErrorResponseCode() {
+ return RandomInts.randomIntBetween(getRandom(), 400, 599);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginsModule.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java
similarity index 65%
rename from core/src/main/java/org/elasticsearch/plugins/PluginsModule.java
rename to client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java
index 04e468cdd6c..bdc052d07c8 100644
--- a/core/src/main/java/org/elasticsearch/plugins/PluginsModule.java
+++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java
@@ -17,20 +17,23 @@
* under the License.
*/
-package org.elasticsearch.plugins;
+package org.elasticsearch.client.sniff;
-import org.elasticsearch.common.inject.AbstractModule;
+import org.apache.http.HttpHost;
-public class PluginsModule extends AbstractModule {
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
- private final PluginsService pluginsService;
-
- public PluginsModule(PluginsService pluginsService) {
- this.pluginsService = pluginsService;
+class MockHostsSniffer extends HostsSniffer {
+ MockHostsSniffer() {
+ super(null, -1, null);
}
@Override
- protected void configure() {
- bind(PluginsService.class).toInstance(pluginsService);
+ public List sniffHosts() throws IOException {
+ List hosts = new ArrayList<>();
+ hosts.add(new HttpHost("localhost", 9200));
+ return hosts;
}
}
diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java
new file mode 100644
index 00000000000..6a71d72f60e
--- /dev/null
+++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.sniff;
+
+import org.apache.http.HttpHost;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestClientTestCase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+public class SniffOnFailureListenerTests extends RestClientTestCase {
+
+ public void testSetSniffer() throws Exception {
+ SniffOnFailureListener listener = new SniffOnFailureListener();
+
+ try {
+ listener.onFailure(null);
+ fail("should have failed");
+ } catch(IllegalStateException e) {
+ assertEquals("sniffer was not set, unable to sniff on failure", e.getMessage());
+ }
+
+ try {
+ listener.setSniffer(null);
+ fail("should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("sniffer must not be null", e.getMessage());
+ }
+
+ RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build();
+ try (Sniffer sniffer = Sniffer.builder(restClient, new MockHostsSniffer()).build()) {
+ listener.setSniffer(sniffer);
+ try {
+ listener.setSniffer(sniffer);
+ fail("should have failed");
+ } catch(IllegalStateException e) {
+ assertEquals("sniffer can only be set once", e.getMessage());
+ }
+ listener.onFailure(new HttpHost("localhost", 9200));
+ }
+ }
+}
diff --git a/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java
new file mode 100644
index 00000000000..defa83554a4
--- /dev/null
+++ b/client/sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.sniff;
+
+import com.carrotsearch.randomizedtesting.generators.RandomInts;
+import org.apache.http.HttpHost;
+import org.elasticsearch.client.RestClient;
+import org.elasticsearch.client.RestClientTestCase;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.fail;
+
+public class SnifferBuilderTests extends RestClientTestCase {
+
+ public void testBuild() throws Exception {
+ int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
+ HttpHost[] hosts = new HttpHost[numNodes];
+ for (int i = 0; i < numNodes; i++) {
+ hosts[i] = new HttpHost("localhost", 9200 + i);
+ }
+
+ HostsSniffer hostsSniffer = new MockHostsSniffer();
+
+ try (RestClient client = RestClient.builder(hosts).build()) {
+ try {
+ Sniffer.builder(null, hostsSniffer).build();
+ fail("should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("restClient cannot be null", e.getMessage());
+ }
+
+ try {
+ Sniffer.builder(client, null).build();
+ fail("should have failed");
+ } catch(NullPointerException e) {
+ assertEquals("hostsSniffer cannot be null", e.getMessage());
+ }
+
+ try {
+ Sniffer.builder(client, hostsSniffer)
+ .setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
+ fail("should have failed");
+ } catch(IllegalArgumentException e) {
+ assertEquals("sniffIntervalMillis must be greater than 0", e.getMessage());
+ }
+
+ try {
+ Sniffer.builder(client, hostsSniffer)
+ .setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
+ fail("should have failed");
+ } catch(IllegalArgumentException e) {
+ assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage());
+ }
+
+ try (Sniffer sniffer = Sniffer.builder(client, hostsSniffer).build()) {
+ assertNotNull(sniffer);
+ }
+
+ Sniffer.Builder builder = Sniffer.builder(client, hostsSniffer);
+ if (getRandom().nextBoolean()) {
+ builder.setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
+ }
+ if (getRandom().nextBoolean()) {
+ builder.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
+ }
+ try (Sniffer sniffer = builder.build()) {
+ assertNotNull(sniffer);
+ }
+ }
+ }
+}
diff --git a/client/test/build.gradle b/client/test/build.gradle
new file mode 100644
index 00000000000..05d044504ec
--- /dev/null
+++ b/client/test/build.gradle
@@ -0,0 +1,63 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.gradle.precommit.PrecommitTasks
+import org.gradle.api.JavaVersion
+
+apply plugin: 'elasticsearch.build'
+apply plugin: 'ru.vyarus.animalsniffer'
+
+targetCompatibility = JavaVersion.VERSION_1_7
+sourceCompatibility = JavaVersion.VERSION_1_7
+
+install.enabled = false
+uploadArchives.enabled = false
+
+dependencies {
+ compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
+ compile "junit:junit:${versions.junit}"
+ compile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
+ compile "org.codehaus.mojo:animal-sniffer-annotations:1.15"
+ signature "org.codehaus.mojo.signature:java17:1.0@signature"
+}
+
+forbiddenApisMain {
+ //client does not depend on core, so only jdk signatures should be checked
+ signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
+}
+
+forbiddenApisTest {
+ //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage
+ bundledSignatures -= 'jdk-non-portable'
+ bundledSignatures += 'jdk-internal'
+ //client does not depend on core, so only jdk signatures should be checked
+ signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
+}
+
+//JarHell is part of es core, which we don't want to pull in
+jarHell.enabled=false
+
+// TODO: should we have licenses for our test deps?
+dependencyLicenses.enabled = false
+
+namingConventions.enabled = false
+
+//we aren't releasing this jar
+thirdPartyAudit.enabled = false
+test.enabled = false
\ No newline at end of file
diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java
new file mode 100644
index 00000000000..8c506beb5ac
--- /dev/null
+++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestCase.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import com.carrotsearch.randomizedtesting.JUnit3MethodProvider;
+import com.carrotsearch.randomizedtesting.MixWithSuiteName;
+import com.carrotsearch.randomizedtesting.RandomizedTest;
+import com.carrotsearch.randomizedtesting.annotations.SeedDecorators;
+import com.carrotsearch.randomizedtesting.annotations.TestMethodProviders;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakAction;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakGroup;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
+import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
+import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
+
+@TestMethodProviders({
+ JUnit3MethodProvider.class
+})
+@SeedDecorators({MixWithSuiteName.class}) // See LUCENE-3995 for rationale.
+@ThreadLeakScope(ThreadLeakScope.Scope.SUITE)
+@ThreadLeakGroup(ThreadLeakGroup.Group.MAIN)
+@ThreadLeakAction({ThreadLeakAction.Action.WARN, ThreadLeakAction.Action.INTERRUPT})
+@ThreadLeakZombies(ThreadLeakZombies.Consequence.IGNORE_REMAINING_TESTS)
+@ThreadLeakLingering(linger = 5000) // 5 sec lingering
+@TimeoutSuite(millis = 2 * 60 * 60 * 1000)
+public abstract class RestClientTestCase extends RandomizedTest {
+
+}
diff --git a/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java
new file mode 100644
index 00000000000..4d4aa00f492
--- /dev/null
+++ b/client/test/src/main/java/org/elasticsearch/client/RestClientTestUtil.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+final class RestClientTestUtil {
+
+ private static final String[] HTTP_METHODS = new String[]{"DELETE", "HEAD", "GET", "OPTIONS", "PATCH", "POST", "PUT", "TRACE"};
+ private static final List ALL_STATUS_CODES;
+ private static final List OK_STATUS_CODES = Arrays.asList(200, 201);
+ private static final List ALL_ERROR_STATUS_CODES;
+ private static List ERROR_NO_RETRY_STATUS_CODES = Arrays.asList(400, 401, 403, 404, 405, 500);
+ private static List ERROR_RETRY_STATUS_CODES = Arrays.asList(502, 503, 504);
+
+ static {
+ ALL_ERROR_STATUS_CODES = new ArrayList<>(ERROR_RETRY_STATUS_CODES);
+ ALL_ERROR_STATUS_CODES.addAll(ERROR_NO_RETRY_STATUS_CODES);
+ ALL_STATUS_CODES = new ArrayList<>(ALL_ERROR_STATUS_CODES);
+ ALL_STATUS_CODES.addAll(OK_STATUS_CODES);
+ }
+
+ private RestClientTestUtil() {
+
+ }
+
+ static String[] getHttpMethods() {
+ return HTTP_METHODS;
+ }
+
+ static String randomHttpMethod(Random random) {
+ return RandomPicks.randomFrom(random, HTTP_METHODS);
+ }
+
+ static int randomStatusCode(Random random) {
+ return RandomPicks.randomFrom(random, ALL_ERROR_STATUS_CODES);
+ }
+
+ static int randomOkStatusCode(Random random) {
+ return RandomPicks.randomFrom(random, OK_STATUS_CODES);
+ }
+
+ static int randomErrorNoRetryStatusCode(Random random) {
+ return RandomPicks.randomFrom(random, ERROR_NO_RETRY_STATUS_CODES);
+ }
+
+ static int randomErrorRetryStatusCode(Random random) {
+ return RandomPicks.randomFrom(random, ERROR_RETRY_STATUS_CODES);
+ }
+
+ static List getOkStatusCodes() {
+ return OK_STATUS_CODES;
+ }
+
+ static List getAllErrorStatusCodes() {
+ return ALL_ERROR_STATUS_CODES;
+ }
+
+ static List getAllStatusCodes() {
+ return ALL_STATUS_CODES;
+ }
+}
diff --git a/core/README.textile b/core/README.textile
deleted file mode 100644
index daaf5ecb70e..00000000000
--- a/core/README.textile
+++ /dev/null
@@ -1,235 +0,0 @@
-h1. Elasticsearch
-
-h2. A Distributed RESTful Search Engine
-
-h3. "https://www.elastic.co/products/elasticsearch":https://www.elastic.co/products/elasticsearch
-
-Elasticsearch is a distributed RESTful search engine built for the cloud. Features include:
-
-* Distributed and Highly Available Search Engine.
-** Each index is fully sharded with a configurable number of shards.
-** Each shard can have one or more replicas.
-** Read / Search operations performed on either one of the replica shard.
-* Multi Tenant with Multi Types.
-** Support for more than one index.
-** Support for more than one type per index.
-** Index level configuration (number of shards, index storage, ...).
-* Various set of APIs
-** HTTP RESTful API
-** Native Java API.
-** All APIs perform automatic node operation rerouting.
-* Document oriented
-** No need for upfront schema definition.
-** Schema can be defined per type for customization of the indexing process.
-* Reliable, Asynchronous Write Behind for long term persistency.
-* (Near) Real Time Search.
-* Built on top of Lucene
-** Each shard is a fully functional Lucene index
-** All the power of Lucene easily exposed through simple configuration / plugins.
-* Per operation consistency
-** Single document level operations are atomic, consistent, isolated and durable.
-* Open Source under the Apache License, version 2 ("ALv2")
-
-h2. Getting Started
-
-First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about.
-
-h3. Requirements
-
-You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information.
-
-h3. Installation
-
-* "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution.
-* Run @bin/elasticsearch@ on unix, or @bin\elasticsearch.bat@ on windows.
-* Run @curl -X GET http://localhost:9200/@.
-* Start more servers ...
-
-h3. Indexing
-
-Let's try and index some twitter like information. First, let's create a twitter user, and add some tweets (the @twitter@ index will be created automatically):
-
-
-curl -XPUT 'http://localhost:9200/twitter/user/kimchy' -d '{ "name" : "Shay Banon" }'
-
-curl -XPUT 'http://localhost:9200/twitter/tweet/1' -d '
-{
- "user": "kimchy",
- "postDate": "2009-11-15T13:12:00",
- "message": "Trying out Elasticsearch, so far so good?"
-}'
-
-curl -XPUT 'http://localhost:9200/twitter/tweet/2' -d '
-{
- "user": "kimchy",
- "postDate": "2009-11-15T14:12:12",
- "message": "Another tweet, will it be indexed?"
-}'
-
-
-Now, let's see if the information was added by GETting it:
-
-
-curl -XGET 'http://localhost:9200/twitter/user/kimchy?pretty=true'
-curl -XGET 'http://localhost:9200/twitter/tweet/1?pretty=true'
-curl -XGET 'http://localhost:9200/twitter/tweet/2?pretty=true'
-
-
-h3. Searching
-
-Mmm search..., shouldn't it be elastic?
-Let's find all the tweets that @kimchy@ posted:
-
-
-curl -XGET 'http://localhost:9200/twitter/tweet/_search?q=user:kimchy&pretty=true'
-
-
-We can also use the JSON query language Elasticsearch provides instead of a query string:
-
-
-curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d '
-{
- "query" : {
- "match" : { "user": "kimchy" }
- }
-}'
-
-
-Just for kicks, let's get all the documents stored (we should see the user as well):
-
-
-curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
-{
- "query" : {
- "matchAll" : {}
- }
-}'
-
-
-We can also do range search (the @postDate@ was automatically identified as date)
-
-
-curl -XGET 'http://localhost:9200/twitter/_search?pretty=true' -d '
-{
- "query" : {
- "range" : {
- "postDate" : { "from" : "2009-11-15T13:00:00", "to" : "2009-11-15T14:00:00" }
- }
- }
-}'
-
-
-There are many more options to perform search, after all, it's a search product no? All the familiar Lucene queries are available through the JSON query language, or through the query parser.
-
-h3. Multi Tenant - Indices and Types
-
-Maan, that twitter index might get big (in this case, index size == valuation). Let's see if we can structure our twitter system a bit differently in order to support such large amounts of data.
-
-Elasticsearch supports multiple indices, as well as multiple types per index. In the previous example we used an index called @twitter@, with two types, @user@ and @tweet@.
-
-Another way to define our simple twitter system is to have a different index per user (note, though that each index has an overhead). Here is the indexing curl's in this case:
-
-
-curl -XPUT 'http://localhost:9200/kimchy/info/1' -d '{ "name" : "Shay Banon" }'
-
-curl -XPUT 'http://localhost:9200/kimchy/tweet/1' -d '
-{
- "user": "kimchy",
- "postDate": "2009-11-15T13:12:00",
- "message": "Trying out Elasticsearch, so far so good?"
-}'
-
-curl -XPUT 'http://localhost:9200/kimchy/tweet/2' -d '
-{
- "user": "kimchy",
- "postDate": "2009-11-15T14:12:12",
- "message": "Another tweet, will it be indexed?"
-}'
-
-
-The above will index information into the @kimchy@ index, with two types, @info@ and @tweet@. Each user will get his own special index.
-
-Complete control on the index level is allowed. As an example, in the above case, we would want to change from the default 5 shards with 1 replica per index, to only 1 shard with 1 replica per index (== per twitter user). Here is how this can be done (the configuration can be in yaml as well):
-
-
-curl -XPUT http://localhost:9200/another_user/ -d '
-{
- "index" : {
- "numberOfShards" : 1,
- "numberOfReplicas" : 1
- }
-}'
-
-
-Search (and similar operations) are multi index aware. This means that we can easily search on more than one
-index (twitter user), for example:
-
-
-curl -XGET 'http://localhost:9200/kimchy,another_user/_search?pretty=true' -d '
-{
- "query" : {
- "matchAll" : {}
- }
-}'
-
-
-Or on all the indices:
-
-
-curl -XGET 'http://localhost:9200/_search?pretty=true' -d '
-{
- "query" : {
- "matchAll" : {}
- }
-}'
-
-
-{One liner teaser}: And the cool part about that? You can easily search on multiple twitter users (indices), with different boost levels per user (index), making social search so much simpler (results from my friends rank higher than results from friends of my friends).
-
-h3. Distributed, Highly Available
-
-Let's face it, things will fail....
-
-Elasticsearch is a highly available and distributed search engine. Each index is broken down into shards, and each shard can have one or more replica. By default, an index is created with 5 shards and 1 replica per shard (5/1). There are many topologies that can be used, including 1/10 (improve search performance), or 20/1 (improve indexing performance, with search executed in a map reduce fashion across shards).
-
-In order to play with the distributed nature of Elasticsearch, simply bring more nodes up and shut down nodes. The system will continue to serve requests (make sure you use the correct http port) with the latest data indexed.
-
-h3. Where to go from here?
-
-We have just covered a very small portion of what Elasticsearch is all about. For more information, please refer to the "elastic.co":http://www.elastic.co/products/elasticsearch website.
-
-h3. Building from Source
-
-Elasticsearch uses "Maven":http://maven.apache.org for its build system.
-
-In order to create a distribution, simply run the @mvn clean package
--DskipTests@ command in the cloned directory.
-
-The distribution will be created under @target/releases@.
-
-See the "TESTING":TESTING.asciidoc file for more information about
-running the Elasticsearch test suite.
-
-h3. Upgrading to Elasticsearch 1.x?
-
-In order to ensure a smooth upgrade process from earlier versions of Elasticsearch (< 1.0.0), it is recommended to perform a full cluster restart. Please see the "setup reference":https://www.elastic.co/guide/en/elasticsearch/reference/current/setup-upgrade.html for more details on the upgrade process.
-
-h1. License
-
-
-This software is licensed under the Apache License, version 2 ("ALv2"), quoted below.
-
-Copyright 2009-2016 Elasticsearch
-
-Licensed under the Apache License, Version 2.0 (the "License"); you may not
-use this file except in compliance with the License. You may obtain a copy of
-the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-License for the specific language governing permissions and limitations under
-the License.
-
diff --git a/core/build.gradle b/core/build.gradle
index ab3754e72ff..6fd8c62af3e 100644
--- a/core/build.gradle
+++ b/core/build.gradle
@@ -24,6 +24,16 @@ import org.elasticsearch.gradle.BuildPlugin
apply plugin: 'elasticsearch.build'
apply plugin: 'com.bmuschko.nexus'
apply plugin: 'nebula.optional-base'
+apply plugin: 'nebula.maven-base-publish'
+apply plugin: 'nebula.maven-scm'
+
+publishing {
+ publications {
+ nebula {
+ artifactId 'elasticsearch'
+ }
+ }
+}
archivesBaseName = 'elasticsearch'
@@ -46,14 +56,14 @@ dependencies {
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
- compile 'org.elasticsearch:securesm:1.0'
+ compile 'org.elasticsearch:securesm:1.1'
// utilities
- compile 'net.sf.jopt-simple:jopt-simple:4.9'
+ compile 'net.sf.jopt-simple:jopt-simple:5.0.2'
compile 'com.carrotsearch:hppc:0.7.1'
// time handling, remove with java 8 time
- compile 'joda-time:joda-time:2.8.2'
+ compile 'joda-time:joda-time:2.9.4'
// joda 2.0 moved to using volatile fields for datetime
// When updating to a new version, make sure to update our copy of BaseDateTime
compile 'org.joda:joda-convert:1.2'
@@ -65,7 +75,7 @@ dependencies {
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
// network stack
- compile 'io.netty:netty:3.10.5.Final'
+ compile 'io.netty:netty:3.10.6.Final'
// percentiles aggregation
compile 'com.tdunning:t-digest:3.0'
// precentil ranks aggregation
@@ -79,7 +89,7 @@ dependencies {
compile "log4j:log4j:${versions.log4j}", optional
compile "log4j:apache-log4j-extras:${versions.log4j}", optional
- compile "net.java.dev.jna:jna:${versions.jna}", optional
+ compile "net.java.dev.jna:jna:${versions.jna}"
if (isEclipse == false || project.path == ":core-tests") {
testCompile("org.elasticsearch.test:framework:${version}") {
@@ -111,6 +121,36 @@ forbiddenPatterns {
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
}
+task generateModulesList {
+ List modules = project(':modules').subprojects.collect { it.name }
+ File modulesFile = new File(buildDir, 'generated-resources/modules.txt')
+ processResources.from(modulesFile)
+ inputs.property('modules', modules)
+ outputs.file(modulesFile)
+ doLast {
+ modulesFile.parentFile.mkdirs()
+ modulesFile.setText(modules.join('\n'), 'UTF-8')
+ }
+}
+
+task generatePluginsList {
+ List plugins = project(':plugins').subprojects
+ .findAll { it.name.contains('example') == false }
+ .collect { it.name }
+ File pluginsFile = new File(buildDir, 'generated-resources/plugins.txt')
+ processResources.from(pluginsFile)
+ inputs.property('plugins', plugins)
+ outputs.file(pluginsFile)
+ doLast {
+ pluginsFile.parentFile.mkdirs()
+ pluginsFile.setText(plugins.join('\n'), 'UTF-8')
+ }
+}
+
+processResources {
+ dependsOn generateModulesList, generatePluginsList
+}
+
thirdPartyAudit.excludes = [
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryParseElement.java b/core/src/main/java/org/apache/log4j/Java9Hack.java
similarity index 63%
rename from core/src/main/java/org/elasticsearch/search/query/QueryParseElement.java
rename to core/src/main/java/org/apache/log4j/Java9Hack.java
index 094a29cd6b1..831cf5b35ae 100644
--- a/core/src/main/java/org/elasticsearch/search/query/QueryParseElement.java
+++ b/core/src/main/java/org/apache/log4j/Java9Hack.java
@@ -17,19 +17,21 @@
* under the License.
*/
-package org.elasticsearch.search.query;
+package org.apache.log4j;
-import org.elasticsearch.common.xcontent.XContentParser;
-import org.elasticsearch.search.SearchParseElement;
-import org.elasticsearch.search.internal.SearchContext;
+import org.apache.log4j.helpers.ThreadLocalMap;
/**
+ * Log4j 1.2 MDC breaks because it parses java.version incorrectly (does not handle new java9 versioning).
*
+ * This hack fixes up the pkg private members as if it had detected the java version correctly.
*/
-public class QueryParseElement implements SearchParseElement {
+public class Java9Hack {
- @Override
- public void parse(XContentParser parser, SearchContext context) throws Exception {
- context.parsedQuery(context.getQueryShardContext().parse(parser));
+ public static void fixLog4j() {
+ if (MDC.mdc.tlm == null) {
+ MDC.mdc.java1 = false;
+ MDC.mdc.tlm = new ThreadLocalMap();
+ }
}
}
diff --git a/core/src/main/java/org/apache/lucene/document/XInetAddressPoint.java b/core/src/main/java/org/apache/lucene/document/XInetAddressPoint.java
deleted file mode 100644
index 580b875ce2c..00000000000
--- a/core/src/main/java/org/apache/lucene/document/XInetAddressPoint.java
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.lucene.document;
-
-import java.net.InetAddress;
-import java.net.UnknownHostException;
-import java.util.Arrays;
-
-import org.apache.lucene.search.Query;
-import org.apache.lucene.util.NumericUtils;
-import org.elasticsearch.common.SuppressForbidden;
-
-/**
- * Forked utility methods from Lucene's InetAddressPoint until LUCENE-7232 and
- * LUCENE-7234 are released.
- */
-// TODO: remove me when we upgrade to Lucene 6.1
-@SuppressForbidden(reason="uses InetAddress.getHostAddress")
-public final class XInetAddressPoint {
-
- private XInetAddressPoint() {}
-
- /** The minimum value that an ip address can hold. */
- public static final InetAddress MIN_VALUE;
- /** The maximum value that an ip address can hold. */
- public static final InetAddress MAX_VALUE;
- static {
- MIN_VALUE = InetAddressPoint.decode(new byte[InetAddressPoint.BYTES]);
- byte[] maxValueBytes = new byte[InetAddressPoint.BYTES];
- Arrays.fill(maxValueBytes, (byte) 0xFF);
- MAX_VALUE = InetAddressPoint.decode(maxValueBytes);
- }
-
- /**
- * Return the {@link InetAddress} that compares immediately greater than
- * {@code address}.
- * @throws ArithmeticException if the provided address is the
- * {@link #MAX_VALUE maximum ip address}
- */
- public static InetAddress nextUp(InetAddress address) {
- if (address.equals(MAX_VALUE)) {
- throw new ArithmeticException("Overflow: there is no greater InetAddress than "
- + address.getHostAddress());
- }
- byte[] delta = new byte[InetAddressPoint.BYTES];
- delta[InetAddressPoint.BYTES-1] = 1;
- byte[] nextUpBytes = new byte[InetAddressPoint.BYTES];
- NumericUtils.add(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextUpBytes);
- return InetAddressPoint.decode(nextUpBytes);
- }
-
- /**
- * Return the {@link InetAddress} that compares immediately less than
- * {@code address}.
- * @throws ArithmeticException if the provided address is the
- * {@link #MIN_VALUE minimum ip address}
- */
- public static InetAddress nextDown(InetAddress address) {
- if (address.equals(MIN_VALUE)) {
- throw new ArithmeticException("Underflow: there is no smaller InetAddress than "
- + address.getHostAddress());
- }
- byte[] delta = new byte[InetAddressPoint.BYTES];
- delta[InetAddressPoint.BYTES-1] = 1;
- byte[] nextDownBytes = new byte[InetAddressPoint.BYTES];
- NumericUtils.subtract(InetAddressPoint.BYTES, 0, InetAddressPoint.encode(address), delta, nextDownBytes);
- return InetAddressPoint.decode(nextDownBytes);
- }
-
- /**
- * Create a prefix query for matching a CIDR network range.
- *
- * @param field field name. must not be {@code null}.
- * @param value any host address
- * @param prefixLength the network prefix length for this address. This is also known as the subnet mask in the context of IPv4
- * addresses.
- * @throws IllegalArgumentException if {@code field} is null, or prefixLength is invalid.
- * @return a query matching documents with addresses contained within this network
- */
- // TODO: remove me when we upgrade to Lucene 6.0.1
- public static Query newPrefixQuery(String field, InetAddress value, int prefixLength) {
- if (value == null) {
- throw new IllegalArgumentException("InetAddress must not be null");
- }
- if (prefixLength < 0 || prefixLength > 8 * value.getAddress().length) {
- throw new IllegalArgumentException("illegal prefixLength '" + prefixLength
- + "'. Must be 0-32 for IPv4 ranges, 0-128 for IPv6 ranges");
- }
- // create the lower value by zeroing out the host portion, upper value by filling it with all ones.
- byte lower[] = value.getAddress();
- byte upper[] = value.getAddress();
- for (int i = prefixLength; i < 8 * lower.length; i++) {
- int m = 1 << (7 - (i & 7));
- lower[i >> 3] &= ~m;
- upper[i >> 3] |= m;
- }
- try {
- return InetAddressPoint.newRangeQuery(field, InetAddress.getByAddress(lower), InetAddress.getByAddress(upper));
- } catch (UnknownHostException e) {
- throw new AssertionError(e); // values are coming from InetAddress
- }
- }
-}
diff --git a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java
index 564f780b8ed..a4b94b007fd 100644
--- a/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java
+++ b/core/src/main/java/org/apache/lucene/queries/BlendedTermQuery.java
@@ -283,7 +283,7 @@ public abstract class BlendedTermQuery extends Query {
@Override
public boolean equals(Object o) {
if (this == o) return true;
- if (!super.equals(o)) return false;
+ if (sameClassAs(o) == false) return false;
BlendedTermQuery that = (BlendedTermQuery) o;
return Arrays.equals(equalsTerms(), that.equalsTerms());
@@ -291,7 +291,7 @@ public abstract class BlendedTermQuery extends Query {
@Override
public int hashCode() {
- return Objects.hash(super.hashCode(), Arrays.hashCode(equalsTerms()));
+ return Objects.hash(classHash(), Arrays.hashCode(equalsTerms()));
}
public static BlendedTermQuery booleanBlendedQuery(Term[] terms, final boolean disableCoord) {
diff --git a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java
index 86982bfc949..a8b7dc9299f 100644
--- a/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java
+++ b/core/src/main/java/org/apache/lucene/queries/MinDocQuery.java
@@ -44,12 +44,12 @@ public final class MinDocQuery extends Query {
@Override
public int hashCode() {
- return Objects.hash(super.hashCode(), minDoc);
+ return Objects.hash(classHash(), minDoc);
}
@Override
public boolean equals(Object obj) {
- if (super.equals(obj) == false) {
+ if (sameClassAs(obj) == false) {
return false;
}
MinDocQuery that = (MinDocQuery) obj;
diff --git a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java
index a9327d785e1..c65f962dbb8 100644
--- a/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java
+++ b/core/src/main/java/org/apache/lucene/search/suggest/analyzing/XAnalyzingSuggester.java
@@ -63,9 +63,6 @@ import org.elasticsearch.common.io.PathUtils;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
@@ -622,8 +619,12 @@ public long ramBytesUsed() {
Set seenSurfaceForms = new HashSet<>();
int dedup = 0;
- while (reader.read(scratch)) {
- input.reset(scratch.bytes(), 0, scratch.length());
+ while (true) {
+ BytesRef bytes = reader.next();
+ if (bytes == null) {
+ break;
+ }
+ input.reset(bytes.bytes, bytes.offset, bytes.length);
short analyzedLength = input.readShort();
analyzed.grow(analyzedLength+2);
input.readBytes(analyzed.bytes(), 0, analyzedLength);
@@ -631,13 +632,13 @@ public long ramBytesUsed() {
long cost = input.readInt();
- surface.bytes = scratch.bytes();
+ surface.bytes = bytes.bytes;
if (hasPayloads) {
surface.length = input.readShort();
surface.offset = input.getPosition();
} else {
surface.offset = input.getPosition();
- surface.length = scratch.length() - surface.offset;
+ surface.length = bytes.length - surface.offset;
}
if (previousAnalyzed == null) {
@@ -679,11 +680,11 @@ public long ramBytesUsed() {
builder.add(scratchInts.get(), outputs.newPair(cost, BytesRef.deepCopyOf(surface)));
} else {
int payloadOffset = input.getPosition() + surface.length;
- int payloadLength = scratch.length() - payloadOffset;
+ int payloadLength = bytes.length - payloadOffset;
BytesRef br = new BytesRef(surface.length + 1 + payloadLength);
System.arraycopy(surface.bytes, surface.offset, br.bytes, 0, surface.length);
br.bytes[surface.length] = (byte) payloadSep;
- System.arraycopy(scratch.bytes(), payloadOffset, br.bytes, surface.length+1, payloadLength);
+ System.arraycopy(bytes.bytes, payloadOffset, br.bytes, surface.length+1, payloadLength);
br.length = br.bytes.length;
builder.add(scratchInts.get(), outputs.newPair(cost, br));
}
@@ -1109,7 +1110,7 @@ public long ramBytesUsed() {
this.analyzed.copyBytes(analyzed);
}
- private final static class SurfaceFormAndPayload implements Comparable {
+ private static final class SurfaceFormAndPayload implements Comparable {
BytesRef payload;
long weight;
diff --git a/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java b/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java
index ea504f7688c..e1ae7b938b3 100644
--- a/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java
+++ b/core/src/main/java/org/apache/lucene/store/StoreRateLimiting.java
@@ -26,7 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
*/
public class StoreRateLimiting {
- public static interface Provider {
+ public interface Provider {
StoreRateLimiting rateLimiting();
}
diff --git a/core/src/main/java/org/elasticsearch/Build.java b/core/src/main/java/org/elasticsearch/Build.java
index f844e3b4040..25da5f28166 100644
--- a/core/src/main/java/org/elasticsearch/Build.java
+++ b/core/src/main/java/org/elasticsearch/Build.java
@@ -19,16 +19,11 @@
package org.elasticsearch;
-import org.elasticsearch.common.SuppressForbidden;
-import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
-import java.net.URISyntaxException;
import java.net.URL;
-import java.nio.file.Files;
-import java.nio.file.Path;
import java.util.jar.JarInputStream;
import java.util.jar.Manifest;
@@ -47,9 +42,9 @@ public class Build {
final String date;
final boolean isSnapshot;
- Path path = getElasticsearchCodebase();
- if (path.toString().endsWith(".jar")) {
- try (JarInputStream jar = new JarInputStream(Files.newInputStream(path))) {
+ final URL url = getElasticsearchCodebase();
+ if (url.toString().endsWith(".jar")) {
+ try (JarInputStream jar = new JarInputStream(url.openStream())) {
Manifest manifest = jar.getManifest();
shortHash = manifest.getMainAttributes().getValue("Change");
date = manifest.getMainAttributes().getValue("Build-Date");
@@ -80,14 +75,8 @@ public class Build {
/**
* Returns path to elasticsearch codebase path
*/
- @SuppressForbidden(reason = "looks up path of elasticsearch.jar directly")
- static Path getElasticsearchCodebase() {
- URL url = Build.class.getProtectionDomain().getCodeSource().getLocation();
- try {
- return PathUtils.get(url.toURI());
- } catch (URISyntaxException bogus) {
- throw new RuntimeException(bogus);
- }
+ static URL getElasticsearchCodebase() {
+ return Build.class.getProtectionDomain().getCodeSource().getLocation();
}
private String shortHash;
diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchException.java b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
index 3332bfed0c3..54bbfc851d2 100644
--- a/core/src/main/java/org/elasticsearch/ElasticsearchException.java
+++ b/core/src/main/java/org/elasticsearch/ElasticsearchException.java
@@ -21,18 +21,18 @@ package org.elasticsearch;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
-import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.LoggerMessageFormat;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.transport.TcpTransport;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
@@ -47,7 +47,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_UUID_NA_VAL
/**
* A base class for all elasticsearch exceptions.
*/
-public class ElasticsearchException extends RuntimeException implements ToXContent {
+public class ElasticsearchException extends RuntimeException implements ToXContent, Writeable {
public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.cause.skip";
public static final String REST_EXCEPTION_SKIP_STACK_TRACE = "rest.exception.stacktrace.skip";
@@ -99,18 +99,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
}
public ElasticsearchException(StreamInput in) throws IOException {
- super(in.readOptionalString(), in.readThrowable());
+ super(in.readOptionalString(), in.readException());
readStackTrace(this, in);
- int numKeys = in.readVInt();
- for (int i = 0; i < numKeys; i++) {
- final String key = in.readString();
- final int numValues = in.readVInt();
- final ArrayList values = new ArrayList<>(numValues);
- for (int j = 0; j < numValues; j++) {
- values.add(in.readString());
- }
- headers.put(key, values);
- }
+ headers.putAll(in.readMapOfLists());
}
/**
@@ -161,7 +152,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
* Unwraps the actual cause from the exception for cases when the exception is a
* {@link ElasticsearchWrapperException}.
*
- * @see org.elasticsearch.ExceptionsHelper#unwrapCause(Throwable)
+ * @see ExceptionsHelper#unwrapCause(Throwable)
*/
public Throwable unwrapCause() {
return ExceptionsHelper.unwrapCause(this);
@@ -200,53 +191,12 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
return rootCause;
}
- /**
- * Check whether this exception contains an exception of the given type:
- * either it is of the given class itself or it contains a nested cause
- * of the given type.
- *
- * @param exType the exception type to look for
- * @return whether there is a nested exception of the specified type
- */
- public boolean contains(Class extends Throwable> exType) {
- if (exType == null) {
- return false;
- }
- if (exType.isInstance(this)) {
- return true;
- }
- Throwable cause = getCause();
- if (cause == this) {
- return false;
- }
- if (cause instanceof ElasticsearchException) {
- return ((ElasticsearchException) cause).contains(exType);
- } else {
- while (cause != null) {
- if (exType.isInstance(cause)) {
- return true;
- }
- if (cause.getCause() == cause) {
- break;
- }
- cause = cause.getCause();
- }
- return false;
- }
- }
-
+ @Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(this.getMessage());
- out.writeThrowable(this.getCause());
+ out.writeException(this.getCause());
writeStackTraces(this, out);
- out.writeVInt(headers.size());
- for (Map.Entry> entry : headers.entrySet()) {
- out.writeString(entry.getKey());
- out.writeVInt(entry.getValue().size());
- for (String v : entry.getValue()) {
- out.writeString(v);
- }
- }
+ out.writeMapOfLists(headers);
}
public static ElasticsearchException readException(StreamInput input, int id) throws IOException {
@@ -448,7 +398,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
int numSuppressed = in.readVInt();
for (int i = 0; i < numSuppressed; i++) {
- throwable.addSuppressed(in.readThrowable());
+ throwable.addSuppressed(in.readException());
}
return throwable;
}
@@ -468,7 +418,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
Throwable[] suppressed = throwable.getSuppressed();
out.writeVInt(suppressed.length);
for (Throwable t : suppressed) {
- out.writeThrowable(t);
+ out.writeException(t);
}
return throwable;
}
@@ -530,7 +480,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,
org.elasticsearch.search.SearchContextMissingException::new, 24),
- SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 25),
+ GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class,
+ org.elasticsearch.script.GeneralScriptException::new, 25),
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
SNAPSHOT_CREATION_EXCEPTION(org.elasticsearch.snapshots.SnapshotCreationException.class,
@@ -679,8 +630,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.index.shard.IndexShardRecoveryException::new, 106),
REPOSITORY_MISSING_EXCEPTION(org.elasticsearch.repositories.RepositoryMissingException.class,
org.elasticsearch.repositories.RepositoryMissingException::new, 107),
- PERCOLATOR_EXCEPTION(org.elasticsearch.index.percolator.PercolatorException.class,
- org.elasticsearch.index.percolator.PercolatorException::new, 108),
DOCUMENT_SOURCE_MISSING_EXCEPTION(org.elasticsearch.index.engine.DocumentSourceMissingException.class,
org.elasticsearch.index.engine.DocumentSourceMissingException::new, 109),
FLUSH_NOT_ALLOWED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.FlushNotAllowedEngineException.class,
@@ -711,8 +660,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class,
org.elasticsearch.script.Script.ScriptParseException::new, 124),
- HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class,
- org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
+ HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class,
+ TcpTransport.HttpOnTransportException::new, 125),
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
org.elasticsearch.index.mapper.MapperParsingException::new, 126),
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class,
@@ -742,7 +691,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
QUERY_SHARD_EXCEPTION(org.elasticsearch.index.query.QueryShardException.class,
org.elasticsearch.index.query.QueryShardException::new, 141),
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
- ShardStateAction.NoLongerPrimaryShardException::new, 142);
+ ShardStateAction.NoLongerPrimaryShardException::new, 142),
+ SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143);
final Class extends ElasticsearchException> exceptionClass;
@@ -827,9 +777,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
return null;
}
- public static void renderThrowable(XContentBuilder builder, Params params, Throwable t) throws IOException {
+ public static void renderException(XContentBuilder builder, Params params, Exception e) throws IOException {
builder.startObject("error");
- final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
+ final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(e);
builder.field("root_cause");
builder.startArray();
for (ElasticsearchException rootCause : rootCauses) {
@@ -839,7 +789,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
builder.endObject();
}
builder.endArray();
- ElasticsearchException.toXContent(builder, params, t);
+ ElasticsearchException.toXContent(builder, params, e);
builder.endObject();
}
diff --git a/core/src/main/java/org/elasticsearch/ElasticsearchSecurityException.java b/core/src/main/java/org/elasticsearch/ElasticsearchSecurityException.java
index f4878fe6f9b..b6cd420c856 100644
--- a/core/src/main/java/org/elasticsearch/ElasticsearchSecurityException.java
+++ b/core/src/main/java/org/elasticsearch/ElasticsearchSecurityException.java
@@ -36,7 +36,7 @@ public class ElasticsearchSecurityException extends ElasticsearchException {
this.status = status ;
}
- public ElasticsearchSecurityException(String msg, Throwable cause, Object... args) {
+ public ElasticsearchSecurityException(String msg, Exception cause, Object... args) {
this(msg, ExceptionsHelper.status(cause), cause, args);
}
diff --git a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java
index 3842ab4e3bf..e2af52ccd2c 100644
--- a/core/src/main/java/org/elasticsearch/ExceptionsHelper.java
+++ b/core/src/main/java/org/elasticsearch/ExceptionsHelper.java
@@ -37,25 +37,22 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
-/**
- *
- */
public final class ExceptionsHelper {
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
- public static RuntimeException convertToRuntime(Throwable t) {
- if (t instanceof RuntimeException) {
- return (RuntimeException) t;
+ public static RuntimeException convertToRuntime(Exception e) {
+ if (e instanceof RuntimeException) {
+ return (RuntimeException) e;
}
- return new ElasticsearchException(t);
+ return new ElasticsearchException(e);
}
- public static ElasticsearchException convertToElastic(Throwable t) {
- if (t instanceof ElasticsearchException) {
- return (ElasticsearchException) t;
+ public static ElasticsearchException convertToElastic(Exception e) {
+ if (e instanceof ElasticsearchException) {
+ return (ElasticsearchException) e;
}
- return new ElasticsearchException(t);
+ return new ElasticsearchException(e);
}
public static RestStatus status(Throwable t) {
@@ -89,15 +86,14 @@ public final class ExceptionsHelper {
return result;
}
+ /**
+ * @deprecated Don't swallow exceptions, allow them to propagate.
+ */
+ @Deprecated
public static String detailedMessage(Throwable t) {
- return detailedMessage(t, false, 0);
- }
-
- public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) {
if (t == null) {
return "Unknown";
}
- int counter = initialCounter + 1;
if (t.getCause() != null) {
StringBuilder sb = new StringBuilder();
while (t != null) {
@@ -107,21 +103,11 @@ public final class ExceptionsHelper {
sb.append(t.getMessage());
sb.append("]");
}
- if (!newLines) {
- sb.append("; ");
- }
+ sb.append("; ");
t = t.getCause();
if (t != null) {
- if (newLines) {
- sb.append("\n");
- for (int i = 0; i < counter; i++) {
- sb.append("\t");
- }
- } else {
- sb.append("nested: ");
- }
+ sb.append("nested: ");
}
- counter++;
}
return sb.toString();
} else {
@@ -175,8 +161,8 @@ public final class ExceptionsHelper {
}
public static IOException unwrapCorruption(Throwable t) {
- return (IOException) unwrap(t, CorruptIndexException.class,
- IndexFormatTooOldException.class,
+ return (IOException) unwrap(t, CorruptIndexException.class,
+ IndexFormatTooOldException.class,
IndexFormatTooNewException.class);
}
@@ -220,7 +206,6 @@ public final class ExceptionsHelper {
return true;
}
-
/**
* Deduplicate the failures by exception message and index.
*/
diff --git a/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java b/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java
index d38de2e3bc1..d408fdef033 100644
--- a/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java
+++ b/core/src/main/java/org/elasticsearch/ResourceNotFoundException.java
@@ -32,7 +32,7 @@ public class ResourceNotFoundException extends ElasticsearchException {
super(msg, args);
}
- protected ResourceNotFoundException(String msg, Throwable cause, Object... args) {
+ public ResourceNotFoundException(String msg, Throwable cause, Object... args) {
super(msg, cause, args);
}
diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java
index 56d245ddc51..da876730b9a 100644
--- a/core/src/main/java/org/elasticsearch/Version.java
+++ b/core/src/main/java/org/elasticsearch/Version.java
@@ -22,7 +22,6 @@ package org.elasticsearch;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.SuppressForbidden;
-import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
@@ -32,7 +31,6 @@ import java.io.IOException;
/**
*/
-@SuppressWarnings("deprecation")
public class Version {
/*
* The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA
@@ -69,11 +67,21 @@ public class Version {
public static final Version V_2_3_1 = new Version(V_2_3_1_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_3_2_ID = 2030299;
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
+ public static final int V_2_3_3_ID = 2030399;
+ public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
+ public static final int V_2_3_4_ID = 2030499;
+ public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_5_0_0_alpha1_ID = 5000001;
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final int V_5_0_0_alpha2_ID = 5000002;
public static final Version V_5_0_0_alpha2 = new Version(V_5_0_0_alpha2_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
- public static final Version CURRENT = V_5_0_0_alpha2;
+ public static final int V_5_0_0_alpha3_ID = 5000003;
+ public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
+ public static final int V_5_0_0_alpha4_ID = 5000004;
+ public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
+ public static final int V_5_0_0_alpha5_ID = 5000005;
+ public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
+ public static final Version CURRENT = V_5_0_0_alpha5;
static {
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
@@ -86,10 +94,20 @@ public class Version {
public static Version fromId(int id) {
switch (id) {
+ case V_5_0_0_alpha5_ID:
+ return V_5_0_0_alpha5;
+ case V_5_0_0_alpha4_ID:
+ return V_5_0_0_alpha4;
+ case V_5_0_0_alpha3_ID:
+ return V_5_0_0_alpha3;
case V_5_0_0_alpha2_ID:
return V_5_0_0_alpha2;
case V_5_0_0_alpha1_ID:
return V_5_0_0_alpha1;
+ case V_2_3_4_ID:
+ return V_2_3_4;
+ case V_2_3_3_ID:
+ return V_2_3_3;
case V_2_3_2_ID:
return V_2_3_2;
case V_2_3_1_ID:
@@ -318,18 +336,4 @@ public class Version {
public boolean isRC() {
return build > 50 && build < 99;
}
-
- public static class Module extends AbstractModule {
-
- private final Version version;
-
- public Module(Version version) {
- this.version = version;
- }
-
- @Override
- protected void configure() {
- bind(Version.class).toInstance(version);
- }
- }
}
diff --git a/core/src/main/java/org/elasticsearch/action/ActionListener.java b/core/src/main/java/org/elasticsearch/action/ActionListener.java
index 8447d6cef08..e7d5ecd8d64 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionListener.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionListener.java
@@ -32,5 +32,5 @@ public interface ActionListener {
/**
* A failure caused by an exception at some phase of the task.
*/
- void onFailure(Throwable e);
+ void onFailure(Exception e);
}
diff --git a/core/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java b/core/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java
index 6cdc1c3194f..e0e04652315 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java
@@ -20,7 +20,7 @@
package org.elasticsearch.action;
import org.elasticsearch.threadpool.ThreadPool;
-import org.elasticsearch.transport.BaseTransportResponseHandler;
+import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportResponse;
@@ -31,7 +31,7 @@ import java.util.function.Supplier;
* A simple base class for action response listeners, defaulting to using the SAME executor (as its
* very common on response handlers).
*/
-public class ActionListenerResponseHandler extends BaseTransportResponseHandler {
+public class ActionListenerResponseHandler implements TransportResponseHandler {
private final ActionListener listener;
private final Supplier responseSupplier;
diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java
index 3e93f699645..46494f4ebdc 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -19,6 +19,12 @@
package org.elasticsearch.action;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
@@ -32,6 +38,8 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction;
import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction;
import org.elasticsearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction;
+import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskAction;
+import org.elasticsearch.action.admin.cluster.node.tasks.get.TransportGetTaskAction;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction;
import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction;
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction;
@@ -62,10 +70,14 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction;
+import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction;
+import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction;
+import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction;
+import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction;
+import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetStoredScriptAction;
+import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction;
import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction;
-import org.elasticsearch.action.admin.cluster.validate.template.RenderSearchTemplateAction;
-import org.elasticsearch.action.admin.cluster.validate.template.TransportRenderSearchTemplateAction;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
import org.elasticsearch.action.admin.indices.alias.TransportIndicesAliasesAction;
import org.elasticsearch.action.admin.indices.alias.exists.AliasesExistAction;
@@ -107,6 +119,8 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryAction;
import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction;
+import org.elasticsearch.action.admin.indices.rollover.RolloverAction;
+import org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction;
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction;
import org.elasticsearch.action.admin.indices.segments.TransportIndicesSegmentsAction;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction;
@@ -115,6 +129,8 @@ import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettin
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction;
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
+import org.elasticsearch.action.admin.indices.shrink.ShrinkAction;
+import org.elasticsearch.action.admin.indices.shrink.TransportShrinkAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
@@ -139,7 +155,7 @@ import org.elasticsearch.action.delete.TransportDeleteAction;
import org.elasticsearch.action.explain.ExplainAction;
import org.elasticsearch.action.explain.TransportExplainAction;
import org.elasticsearch.action.fieldstats.FieldStatsAction;
-import org.elasticsearch.action.fieldstats.TransportFieldStatsTransportAction;
+import org.elasticsearch.action.fieldstats.TransportFieldStatsAction;
import org.elasticsearch.action.get.GetAction;
import org.elasticsearch.action.get.MultiGetAction;
import org.elasticsearch.action.get.TransportGetAction;
@@ -147,28 +163,18 @@ import org.elasticsearch.action.get.TransportMultiGetAction;
import org.elasticsearch.action.get.TransportShardMultiGetAction;
import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.TransportIndexAction;
-import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptAction;
-import org.elasticsearch.action.admin.cluster.storedscripts.TransportDeleteStoredScriptAction;
-import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptAction;
-import org.elasticsearch.action.admin.cluster.storedscripts.TransportGetStoredScriptAction;
-import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptAction;
-import org.elasticsearch.action.admin.cluster.storedscripts.TransportPutStoredScriptAction;
-import org.elasticsearch.action.ingest.IngestActionFilter;
-import org.elasticsearch.action.ingest.IngestProxyActionFilter;
import org.elasticsearch.action.ingest.DeletePipelineAction;
import org.elasticsearch.action.ingest.DeletePipelineTransportAction;
import org.elasticsearch.action.ingest.GetPipelineAction;
import org.elasticsearch.action.ingest.GetPipelineTransportAction;
+import org.elasticsearch.action.ingest.IngestActionFilter;
+import org.elasticsearch.action.ingest.IngestProxyActionFilter;
import org.elasticsearch.action.ingest.PutPipelineAction;
import org.elasticsearch.action.ingest.PutPipelineTransportAction;
import org.elasticsearch.action.ingest.SimulatePipelineAction;
import org.elasticsearch.action.ingest.SimulatePipelineTransportAction;
import org.elasticsearch.action.main.MainAction;
import org.elasticsearch.action.main.TransportMainAction;
-import org.elasticsearch.action.percolate.MultiPercolateAction;
-import org.elasticsearch.action.percolate.PercolateAction;
-import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
-import org.elasticsearch.action.percolate.TransportPercolateAction;
import org.elasticsearch.action.search.ClearScrollAction;
import org.elasticsearch.action.search.MultiSearchAction;
import org.elasticsearch.action.search.SearchAction;
@@ -189,189 +195,470 @@ import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction
import org.elasticsearch.action.termvectors.TransportTermVectorsAction;
import org.elasticsearch.action.update.TransportUpdateAction;
import org.elasticsearch.action.update.UpdateAction;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.common.NamedRegistry;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.MapBinder;
import org.elasticsearch.common.inject.multibindings.Multibinder;
+import org.elasticsearch.common.network.NetworkModule;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.plugins.ActionPlugin;
+import org.elasticsearch.plugins.ActionPlugin.ActionHandler;
+import org.elasticsearch.rest.RestController;
+import org.elasticsearch.rest.RestHandler;
+import org.elasticsearch.rest.action.admin.cluster.allocation.RestClusterAllocationExplainAction;
+import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction;
+import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
+import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
+import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
+import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestCancelTasksAction;
+import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestGetTaskAction;
+import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestListTasksAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction;
+import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction;
+import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction;
+import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction;
+import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction;
+import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction;
+import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction;
+import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
+import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
+import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteStoredScriptAction;
+import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetStoredScriptAction;
+import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutStoredScriptAction;
+import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
+import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction;
+import org.elasticsearch.rest.action.admin.indices.RestShrinkIndexAction;
+import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction;
+import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction;
+import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction;
+import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction;
+import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction;
+import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction;
+import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction;
+import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction;
+import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction;
+import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction;
+import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction;
+import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction;
+import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction;
+import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction;
+import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction;
+import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction;
+import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction;
+import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction;
+import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction;
+import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction;
+import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction;
+import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction;
+import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction;
+import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction;
+import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
+import org.elasticsearch.rest.action.bulk.RestBulkAction;
+import org.elasticsearch.rest.action.cat.AbstractCatAction;
+import org.elasticsearch.rest.action.cat.RestAliasAction;
+import org.elasticsearch.rest.action.cat.RestAllocationAction;
+import org.elasticsearch.rest.action.cat.RestCatAction;
+import org.elasticsearch.rest.action.cat.RestFielddataAction;
+import org.elasticsearch.rest.action.cat.RestHealthAction;
+import org.elasticsearch.rest.action.cat.RestIndicesAction;
+import org.elasticsearch.rest.action.cat.RestMasterAction;
+import org.elasticsearch.rest.action.cat.RestNodeAttrsAction;
+import org.elasticsearch.rest.action.cat.RestNodesAction;
+import org.elasticsearch.rest.action.cat.RestPluginsAction;
+import org.elasticsearch.rest.action.cat.RestRepositoriesAction;
+import org.elasticsearch.rest.action.cat.RestSegmentsAction;
+import org.elasticsearch.rest.action.cat.RestShardsAction;
+import org.elasticsearch.rest.action.cat.RestSnapshotAction;
+import org.elasticsearch.rest.action.cat.RestTasksAction;
+import org.elasticsearch.rest.action.cat.RestThreadPoolAction;
+import org.elasticsearch.rest.action.delete.RestDeleteAction;
+import org.elasticsearch.rest.action.explain.RestExplainAction;
+import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction;
+import org.elasticsearch.rest.action.get.RestGetAction;
+import org.elasticsearch.rest.action.get.RestGetSourceAction;
+import org.elasticsearch.rest.action.get.RestHeadAction;
+import org.elasticsearch.rest.action.get.RestMultiGetAction;
+import org.elasticsearch.rest.action.index.RestIndexAction;
+import org.elasticsearch.rest.action.ingest.RestDeletePipelineAction;
+import org.elasticsearch.rest.action.ingest.RestGetPipelineAction;
+import org.elasticsearch.rest.action.ingest.RestPutPipelineAction;
+import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction;
+import org.elasticsearch.rest.action.main.RestMainAction;
+import org.elasticsearch.rest.action.search.RestClearScrollAction;
+import org.elasticsearch.rest.action.search.RestMultiSearchAction;
+import org.elasticsearch.rest.action.search.RestSearchAction;
+import org.elasticsearch.rest.action.search.RestSearchScrollAction;
+import org.elasticsearch.rest.action.suggest.RestSuggestAction;
+import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
+import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
+import org.elasticsearch.rest.action.update.RestUpdateAction;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
+import static java.util.Collections.unmodifiableList;
+import static java.util.Collections.unmodifiableMap;
/**
- *
+ * Builds and binds the generic action map, all {@link TransportAction}s, and {@link ActionFilters}.
*/
public class ActionModule extends AbstractModule {
- private final Map actions = new HashMap<>();
- private final List> actionFilters = new ArrayList<>();
+ private final boolean transportClient;
+ private final Settings settings;
+ private final List actionPlugins;
+ private final Map> actions;
+ private final List> actionFilters;
+ private final AutoCreateIndex autoCreateIndex;
+ private final DestructiveOperations destructiveOperations;
+ private final RestController restController;
- static class ActionEntry, Response extends ActionResponse> {
- public final GenericAction action;
- public final Class extends TransportAction> transportAction;
- public final Class[] supportTransportActions;
+ public ActionModule(boolean ingestEnabled, boolean transportClient, Settings settings, IndexNameExpressionResolver resolver,
+ ClusterSettings clusterSettings, List actionPlugins) {
+ this.transportClient = transportClient;
+ this.settings = settings;
+ this.actionPlugins = actionPlugins;
+ actions = setupActions(actionPlugins);
+ actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
+ autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
+ destructiveOperations = new DestructiveOperations(settings, clusterSettings);
+ restController = new RestController(settings);
+ }
- ActionEntry(GenericAction action, Class extends TransportAction> transportAction, Class... supportTransportActions) {
- this.action = action;
- this.transportAction = transportAction;
- this.supportTransportActions = supportTransportActions;
+ public Map> getActions() {
+ return actions;
+ }
+
+ static Map> setupActions(List actionPlugins) {
+ // Subclass NamedRegistry for easy registration
+ class ActionRegistry extends NamedRegistry> {
+ public ActionRegistry() {
+ super("action");
+ }
+
+ public void register(ActionHandler, ?> handler) {
+ register(handler.getAction().name(), handler);
+ }
+
+ public , Response extends ActionResponse> void register(
+ GenericAction action, Class extends TransportAction> transportAction,
+ Class>... supportTransportActions) {
+ register(new ActionHandler<>(action, transportAction, supportTransportActions));
+ }
}
+ ActionRegistry actions = new ActionRegistry();
+
+ actions.register(MainAction.INSTANCE, TransportMainAction.class);
+ actions.register(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
+ actions.register(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
+ actions.register(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
+ actions.register(ListTasksAction.INSTANCE, TransportListTasksAction.class);
+ actions.register(GetTaskAction.INSTANCE, TransportGetTaskAction.class);
+ actions.register(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
+
+ actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class);
+ actions.register(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
+ actions.register(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
+ actions.register(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
+ actions.register(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class);
+ actions.register(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class);
+ actions.register(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class);
+ actions.register(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class);
+ actions.register(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class);
+ actions.register(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class);
+ actions.register(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class);
+ actions.register(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class);
+ actions.register(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class);
+ actions.register(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class);
+ actions.register(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class);
+ actions.register(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class);
+ actions.register(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class);
+
+ actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class);
+ actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class);
+ actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class);
+ actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class);
+ actions.register(ShrinkAction.INSTANCE, TransportShrinkAction.class);
+ actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class);
+ actions.register(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class);
+ actions.register(GetIndexAction.INSTANCE, TransportGetIndexAction.class);
+ actions.register(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class);
+ actions.register(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class);
+ actions.register(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class);
+ actions.register(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class);
+ actions.register(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class);
+ actions.register(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class,
+ TransportGetFieldMappingsIndexAction.class);
+ actions.register(PutMappingAction.INSTANCE, TransportPutMappingAction.class);
+ actions.register(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class);
+ actions.register(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class);
+ actions.register(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class);
+ actions.register(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class);
+ actions.register(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class);
+ actions.register(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class);
+ actions.register(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
+ actions.register(RefreshAction.INSTANCE, TransportRefreshAction.class);
+ actions.register(FlushAction.INSTANCE, TransportFlushAction.class);
+ actions.register(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
+ actions.register(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
+ actions.register(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
+ actions.register(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
+ actions.register(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
+ actions.register(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
+ actions.register(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
+ actions.register(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
+ actions.register(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);
+
+ actions.register(IndexAction.INSTANCE, TransportIndexAction.class);
+ actions.register(GetAction.INSTANCE, TransportGetAction.class);
+ actions.register(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class);
+ actions.register(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
+ TransportShardMultiTermsVectorAction.class);
+ actions.register(DeleteAction.INSTANCE, TransportDeleteAction.class);
+ actions.register(UpdateAction.INSTANCE, TransportUpdateAction.class);
+ actions.register(MultiGetAction.INSTANCE, TransportMultiGetAction.class,
+ TransportShardMultiGetAction.class);
+ actions.register(BulkAction.INSTANCE, TransportBulkAction.class,
+ TransportShardBulkAction.class);
+ actions.register(SearchAction.INSTANCE, TransportSearchAction.class);
+ actions.register(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
+ actions.register(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
+ actions.register(ExplainAction.INSTANCE, TransportExplainAction.class);
+ actions.register(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
+ actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class);
+
+ //Indexed scripts
+ actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class);
+ actions.register(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class);
+ actions.register(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class);
+
+ actions.register(FieldStatsAction.INSTANCE, TransportFieldStatsAction.class);
+
+ actions.register(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class);
+ actions.register(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class);
+ actions.register(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class);
+ actions.register(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class);
+
+ actionPlugins.stream().flatMap(p -> p.getActions().stream()).forEach(actions::register);
+
+ return unmodifiableMap(actions.getRegistry());
}
- private final boolean ingestEnabled;
- private final boolean proxy;
+ private List> setupActionFilters(List actionPlugins, boolean ingestEnabled) {
+ List> filters = new ArrayList<>();
+ if (transportClient == false) {
+ if (ingestEnabled) {
+ filters.add(IngestActionFilter.class);
+ } else {
+ filters.add(IngestProxyActionFilter.class);
+ }
+ }
- public ActionModule(boolean ingestEnabled, boolean proxy) {
- this.ingestEnabled = ingestEnabled;
- this.proxy = proxy;
+ for (ActionPlugin plugin : actionPlugins) {
+ filters.addAll(plugin.getActionFilters());
+ }
+ return unmodifiableList(filters);
}
- /**
- * Registers an action.
- *
- * @param action The action type.
- * @param transportAction The transport action implementing the actual action.
- * @param supportTransportActions Any support actions that are needed by the transport action.
- * @param The request type.
- * @param The response type.
- */
- public , Response extends ActionResponse> void registerAction(GenericAction action, Class extends TransportAction> transportAction, Class... supportTransportActions) {
- actions.put(action.name(), new ActionEntry<>(action, transportAction, supportTransportActions));
+ static Set> setupRestHandlers(List actionPlugins) {
+ Set> handlers = new HashSet<>();
+ registerRestHandler(handlers, RestMainAction.class);
+ registerRestHandler(handlers, RestNodesInfoAction.class);
+ registerRestHandler(handlers, RestNodesStatsAction.class);
+ registerRestHandler(handlers, RestNodesHotThreadsAction.class);
+ registerRestHandler(handlers, RestClusterAllocationExplainAction.class);
+ registerRestHandler(handlers, RestClusterStatsAction.class);
+ registerRestHandler(handlers, RestClusterStateAction.class);
+ registerRestHandler(handlers, RestClusterHealthAction.class);
+ registerRestHandler(handlers, RestClusterUpdateSettingsAction.class);
+ registerRestHandler(handlers, RestClusterGetSettingsAction.class);
+ registerRestHandler(handlers, RestClusterRerouteAction.class);
+ registerRestHandler(handlers, RestClusterSearchShardsAction.class);
+ registerRestHandler(handlers, RestPendingClusterTasksAction.class);
+ registerRestHandler(handlers, RestPutRepositoryAction.class);
+ registerRestHandler(handlers, RestGetRepositoriesAction.class);
+ registerRestHandler(handlers, RestDeleteRepositoryAction.class);
+ registerRestHandler(handlers, RestVerifyRepositoryAction.class);
+ registerRestHandler(handlers, RestGetSnapshotsAction.class);
+ registerRestHandler(handlers, RestCreateSnapshotAction.class);
+ registerRestHandler(handlers, RestRestoreSnapshotAction.class);
+ registerRestHandler(handlers, RestDeleteSnapshotAction.class);
+ registerRestHandler(handlers, RestSnapshotsStatusAction.class);
+
+ registerRestHandler(handlers, RestIndicesExistsAction.class);
+ registerRestHandler(handlers, RestTypesExistsAction.class);
+ registerRestHandler(handlers, RestGetIndicesAction.class);
+ registerRestHandler(handlers, RestIndicesStatsAction.class);
+ registerRestHandler(handlers, RestIndicesSegmentsAction.class);
+ registerRestHandler(handlers, RestIndicesShardStoresAction.class);
+ registerRestHandler(handlers, RestGetAliasesAction.class);
+ registerRestHandler(handlers, RestAliasesExistAction.class);
+ registerRestHandler(handlers, RestIndexDeleteAliasesAction.class);
+ registerRestHandler(handlers, RestIndexPutAliasAction.class);
+ registerRestHandler(handlers, RestIndicesAliasesAction.class);
+ registerRestHandler(handlers, RestCreateIndexAction.class);
+ registerRestHandler(handlers, RestShrinkIndexAction.class);
+ registerRestHandler(handlers, RestRolloverIndexAction.class);
+ registerRestHandler(handlers, RestDeleteIndexAction.class);
+ registerRestHandler(handlers, RestCloseIndexAction.class);
+ registerRestHandler(handlers, RestOpenIndexAction.class);
+
+ registerRestHandler(handlers, RestUpdateSettingsAction.class);
+ registerRestHandler(handlers, RestGetSettingsAction.class);
+
+ registerRestHandler(handlers, RestAnalyzeAction.class);
+ registerRestHandler(handlers, RestGetIndexTemplateAction.class);
+ registerRestHandler(handlers, RestPutIndexTemplateAction.class);
+ registerRestHandler(handlers, RestDeleteIndexTemplateAction.class);
+ registerRestHandler(handlers, RestHeadIndexTemplateAction.class);
+
+ registerRestHandler(handlers, RestPutMappingAction.class);
+ registerRestHandler(handlers, RestGetMappingAction.class);
+ registerRestHandler(handlers, RestGetFieldMappingAction.class);
+
+ registerRestHandler(handlers, RestRefreshAction.class);
+ registerRestHandler(handlers, RestFlushAction.class);
+ registerRestHandler(handlers, RestSyncedFlushAction.class);
+ registerRestHandler(handlers, RestForceMergeAction.class);
+ registerRestHandler(handlers, RestUpgradeAction.class);
+ registerRestHandler(handlers, RestClearIndicesCacheAction.class);
+
+ registerRestHandler(handlers, RestIndexAction.class);
+ registerRestHandler(handlers, RestGetAction.class);
+ registerRestHandler(handlers, RestGetSourceAction.class);
+ registerRestHandler(handlers, RestHeadAction.Document.class);
+ registerRestHandler(handlers, RestHeadAction.Source.class);
+ registerRestHandler(handlers, RestMultiGetAction.class);
+ registerRestHandler(handlers, RestDeleteAction.class);
+ registerRestHandler(handlers, org.elasticsearch.rest.action.count.RestCountAction.class);
+ registerRestHandler(handlers, RestSuggestAction.class);
+ registerRestHandler(handlers, RestTermVectorsAction.class);
+ registerRestHandler(handlers, RestMultiTermVectorsAction.class);
+ registerRestHandler(handlers, RestBulkAction.class);
+ registerRestHandler(handlers, RestUpdateAction.class);
+
+ registerRestHandler(handlers, RestSearchAction.class);
+ registerRestHandler(handlers, RestSearchScrollAction.class);
+ registerRestHandler(handlers, RestClearScrollAction.class);
+ registerRestHandler(handlers, RestMultiSearchAction.class);
+
+ registerRestHandler(handlers, RestValidateQueryAction.class);
+
+ registerRestHandler(handlers, RestExplainAction.class);
+
+ registerRestHandler(handlers, RestRecoveryAction.class);
+
+ // Scripts API
+ registerRestHandler(handlers, RestGetStoredScriptAction.class);
+ registerRestHandler(handlers, RestPutStoredScriptAction.class);
+ registerRestHandler(handlers, RestDeleteStoredScriptAction.class);
+
+ registerRestHandler(handlers, RestFieldStatsAction.class);
+
+ // Tasks API
+ registerRestHandler(handlers, RestListTasksAction.class);
+ registerRestHandler(handlers, RestGetTaskAction.class);
+ registerRestHandler(handlers, RestCancelTasksAction.class);
+
+ // Ingest API
+ registerRestHandler(handlers, RestPutPipelineAction.class);
+ registerRestHandler(handlers, RestGetPipelineAction.class);
+ registerRestHandler(handlers, RestDeletePipelineAction.class);
+ registerRestHandler(handlers, RestSimulatePipelineAction.class);
+
+ // CAT API
+ registerRestHandler(handlers, RestCatAction.class);
+ registerRestHandler(handlers, RestAllocationAction.class);
+ registerRestHandler(handlers, RestShardsAction.class);
+ registerRestHandler(handlers, RestMasterAction.class);
+ registerRestHandler(handlers, RestNodesAction.class);
+ registerRestHandler(handlers, RestTasksAction.class);
+ registerRestHandler(handlers, RestIndicesAction.class);
+ registerRestHandler(handlers, RestSegmentsAction.class);
+ // Fully qualified to prevent interference with rest.action.count.RestCountAction
+ registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestCountAction.class);
+ // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction
+ registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestRecoveryAction.class);
+ registerRestHandler(handlers, RestHealthAction.class);
+ registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class);
+ registerRestHandler(handlers, RestAliasAction.class);
+ registerRestHandler(handlers, RestThreadPoolAction.class);
+ registerRestHandler(handlers, RestPluginsAction.class);
+ registerRestHandler(handlers, RestFielddataAction.class);
+ registerRestHandler(handlers, RestNodeAttrsAction.class);
+ registerRestHandler(handlers, RestRepositoriesAction.class);
+ registerRestHandler(handlers, RestSnapshotAction.class);
+ for (ActionPlugin plugin : actionPlugins) {
+ for (Class extends RestHandler> handler : plugin.getRestHandlers()) {
+ registerRestHandler(handlers, handler);
+ }
+ }
+ return handlers;
}
- public ActionModule registerFilter(Class extends ActionFilter> actionFilter) {
- actionFilters.add(actionFilter);
- return this;
+ private static void registerRestHandler(Set> handlers, Class extends RestHandler> handler) {
+ if (handlers.contains(handler)) {
+ throw new IllegalArgumentException("can't register the same [rest_handler] more than once for [" + handler.getName() + "]");
+ }
+ handlers.add(handler);
}
@Override
protected void configure() {
- if (proxy == false) {
- if (ingestEnabled) {
- registerFilter(IngestActionFilter.class);
- } else {
- registerFilter(IngestProxyActionFilter.class);
- }
- }
-
Multibinder actionFilterMultibinder = Multibinder.newSetBinder(binder(), ActionFilter.class);
for (Class extends ActionFilter> actionFilter : actionFilters) {
actionFilterMultibinder.addBinding().to(actionFilter);
}
bind(ActionFilters.class).asEagerSingleton();
- bind(AutoCreateIndex.class).asEagerSingleton();
- bind(DestructiveOperations.class).asEagerSingleton();
- registerAction(MainAction.INSTANCE, TransportMainAction.class);
- registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class);
- registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class);
- registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
- registerAction(ListTasksAction.INSTANCE, TransportListTasksAction.class);
- registerAction(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class);
+ bind(DestructiveOperations.class).toInstance(destructiveOperations);
- registerAction(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class);
- registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
- registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
- registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
- registerAction(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class);
- registerAction(ClusterRerouteAction.INSTANCE, TransportClusterRerouteAction.class);
- registerAction(ClusterSearchShardsAction.INSTANCE, TransportClusterSearchShardsAction.class);
- registerAction(PendingClusterTasksAction.INSTANCE, TransportPendingClusterTasksAction.class);
- registerAction(PutRepositoryAction.INSTANCE, TransportPutRepositoryAction.class);
- registerAction(GetRepositoriesAction.INSTANCE, TransportGetRepositoriesAction.class);
- registerAction(DeleteRepositoryAction.INSTANCE, TransportDeleteRepositoryAction.class);
- registerAction(VerifyRepositoryAction.INSTANCE, TransportVerifyRepositoryAction.class);
- registerAction(GetSnapshotsAction.INSTANCE, TransportGetSnapshotsAction.class);
- registerAction(DeleteSnapshotAction.INSTANCE, TransportDeleteSnapshotAction.class);
- registerAction(CreateSnapshotAction.INSTANCE, TransportCreateSnapshotAction.class);
- registerAction(RestoreSnapshotAction.INSTANCE, TransportRestoreSnapshotAction.class);
- registerAction(SnapshotsStatusAction.INSTANCE, TransportSnapshotsStatusAction.class);
-
- registerAction(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class);
- registerAction(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class);
- registerAction(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class);
- registerAction(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class);
- registerAction(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class);
- registerAction(GetIndexAction.INSTANCE, TransportGetIndexAction.class);
- registerAction(OpenIndexAction.INSTANCE, TransportOpenIndexAction.class);
- registerAction(CloseIndexAction.INSTANCE, TransportCloseIndexAction.class);
- registerAction(IndicesExistsAction.INSTANCE, TransportIndicesExistsAction.class);
- registerAction(TypesExistsAction.INSTANCE, TransportTypesExistsAction.class);
- registerAction(GetMappingsAction.INSTANCE, TransportGetMappingsAction.class);
- registerAction(GetFieldMappingsAction.INSTANCE, TransportGetFieldMappingsAction.class, TransportGetFieldMappingsIndexAction.class);
- registerAction(PutMappingAction.INSTANCE, TransportPutMappingAction.class);
- registerAction(IndicesAliasesAction.INSTANCE, TransportIndicesAliasesAction.class);
- registerAction(UpdateSettingsAction.INSTANCE, TransportUpdateSettingsAction.class);
- registerAction(AnalyzeAction.INSTANCE, TransportAnalyzeAction.class);
- registerAction(PutIndexTemplateAction.INSTANCE, TransportPutIndexTemplateAction.class);
- registerAction(GetIndexTemplatesAction.INSTANCE, TransportGetIndexTemplatesAction.class);
- registerAction(DeleteIndexTemplateAction.INSTANCE, TransportDeleteIndexTemplateAction.class);
- registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
- registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
- registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
- registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
- registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
- registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
- registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);
- registerAction(UpgradeSettingsAction.INSTANCE, TransportUpgradeSettingsAction.class);
- registerAction(ClearIndicesCacheAction.INSTANCE, TransportClearIndicesCacheAction.class);
- registerAction(GetAliasesAction.INSTANCE, TransportGetAliasesAction.class);
- registerAction(AliasesExistAction.INSTANCE, TransportAliasesExistAction.class);
- registerAction(GetSettingsAction.INSTANCE, TransportGetSettingsAction.class);
-
- registerAction(IndexAction.INSTANCE, TransportIndexAction.class);
- registerAction(GetAction.INSTANCE, TransportGetAction.class);
- registerAction(TermVectorsAction.INSTANCE, TransportTermVectorsAction.class);
- registerAction(MultiTermVectorsAction.INSTANCE, TransportMultiTermVectorsAction.class,
- TransportShardMultiTermsVectorAction.class);
- registerAction(DeleteAction.INSTANCE, TransportDeleteAction.class);
- registerAction(UpdateAction.INSTANCE, TransportUpdateAction.class);
- registerAction(MultiGetAction.INSTANCE, TransportMultiGetAction.class,
- TransportShardMultiGetAction.class);
- registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
- TransportShardBulkAction.class);
- registerAction(SearchAction.INSTANCE, TransportSearchAction.class);
- registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
- registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
- registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
- registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class);
- registerAction(ExplainAction.INSTANCE, TransportExplainAction.class);
- registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
- registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class);
- registerAction(RenderSearchTemplateAction.INSTANCE, TransportRenderSearchTemplateAction.class);
-
- //Indexed scripts
- registerAction(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class);
- registerAction(GetStoredScriptAction.INSTANCE, TransportGetStoredScriptAction.class);
- registerAction(DeleteStoredScriptAction.INSTANCE, TransportDeleteStoredScriptAction.class);
-
- registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsTransportAction.class);
-
- registerAction(PutPipelineAction.INSTANCE, PutPipelineTransportAction.class);
- registerAction(GetPipelineAction.INSTANCE, GetPipelineTransportAction.class);
- registerAction(DeletePipelineAction.INSTANCE, DeletePipelineTransportAction.class);
- registerAction(SimulatePipelineAction.INSTANCE, SimulatePipelineTransportAction.class);
-
- // register Name -> GenericAction Map that can be injected to instances.
- MapBinder actionsBinder
- = MapBinder.newMapBinder(binder(), String.class, GenericAction.class);
-
- for (Map.Entry entry : actions.entrySet()) {
- actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().action);
- }
- // register GenericAction -> transportAction Map that can be injected to instances.
- // also register any supporting classes
- if (!proxy) {
+ if (false == transportClient) {
+ // Supporting classes only used when not a transport client
+ bind(AutoCreateIndex.class).toInstance(autoCreateIndex);
bind(TransportLivenessAction.class).asEagerSingleton();
+
+ // register GenericAction -> transportAction Map used by NodeClient
+ @SuppressWarnings("rawtypes")
MapBinder transportActionsBinder
= MapBinder.newMapBinder(binder(), GenericAction.class, TransportAction.class);
- for (Map.Entry entry : actions.entrySet()) {
+ for (ActionHandler, ?> action : actions.values()) {
// bind the action as eager singleton, so the map binder one will reuse it
- bind(entry.getValue().transportAction).asEagerSingleton();
- transportActionsBinder.addBinding(entry.getValue().action).to(entry.getValue().transportAction).asEagerSingleton();
- for (Class supportAction : entry.getValue().supportTransportActions) {
+ bind(action.getTransportAction()).asEagerSingleton();
+ transportActionsBinder.addBinding(action.getAction()).to(action.getTransportAction()).asEagerSingleton();
+ for (Class> supportAction : action.getSupportTransportActions()) {
bind(supportAction).asEagerSingleton();
}
}
+
+ // Bind the RestController which is required (by Node) even if rest isn't enabled.
+ bind(RestController.class).toInstance(restController);
+
+ // Setup the RestHandlers
+ if (NetworkModule.HTTP_ENABLED.get(settings)) {
+ Multibinder restHandlers = Multibinder.newSetBinder(binder(), RestHandler.class);
+ Multibinder catHandlers = Multibinder.newSetBinder(binder(), AbstractCatAction.class);
+ for (Class extends RestHandler> handler : setupRestHandlers(actionPlugins)) {
+ bind(handler).asEagerSingleton();
+ if (AbstractCatAction.class.isAssignableFrom(handler)) {
+ catHandlers.addBinding().to(handler.asSubclass(AbstractCatAction.class));
+ } else {
+ restHandlers.addBinding().to(handler);
+ }
+ }
+ }
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/ActionRequest.java b/core/src/main/java/org/elasticsearch/action/ActionRequest.java
index 7955855bc0d..bc052895a6f 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionRequest.java
@@ -39,6 +39,13 @@ public abstract class ActionRequest> exte
public abstract ActionRequestValidationException validate();
+ /**
+ * Should this task persist its result after it has finished?
+ */
+ public boolean getShouldPersistResult() {
+ return false;
+ }
+
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
diff --git a/core/src/main/java/org/elasticsearch/action/ActionRunnable.java b/core/src/main/java/org/elasticsearch/action/ActionRunnable.java
index 36c3f4f17fa..78e2249d6f4 100644
--- a/core/src/main/java/org/elasticsearch/action/ActionRunnable.java
+++ b/core/src/main/java/org/elasticsearch/action/ActionRunnable.java
@@ -22,11 +22,11 @@ package org.elasticsearch.action;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
/**
- * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Throwable)} in case an uncaught
+ * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught
* exception or error is thrown while the actual action is run.
*/
public abstract class ActionRunnable extends AbstractRunnable {
-
+
protected final ActionListener listener;
public ActionRunnable(ActionListener listener) {
@@ -34,11 +34,11 @@ public abstract class ActionRunnable extends AbstractRunnable {
}
/**
- * Calls the action listeners {@link ActionListener#onFailure(Throwable)} method with the given exception.
+ * Calls the action listeners {@link ActionListener#onFailure(Exception)} method with the given exception.
* This method is invoked for all exception thrown by {@link #doRun()}
*/
@Override
- public void onFailure(Throwable t) {
- listener.onFailure(t);
+ public void onFailure(Exception e) {
+ listener.onFailure(e);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java
index 4df43b75401..0925c744144 100644
--- a/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/DocWriteResponse.java
@@ -18,10 +18,15 @@
*/
package org.elasticsearch.action;
+import org.elasticsearch.action.support.WriteRequest;
+import org.elasticsearch.action.support.WriteResponse;
+import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
+import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
@@ -30,12 +35,13 @@ import java.io.IOException;
/**
* A base class for the response of a write operation that involves a single doc
*/
-public abstract class DocWriteResponse extends ReplicationResponse implements StatusToXContent {
+public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent {
private ShardId shardId;
private String id;
private String type;
private long version;
+ private boolean forcedRefresh;
public DocWriteResponse(ShardId shardId, String type, String id, long version) {
this.shardId = shardId;
@@ -84,6 +90,20 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
return this.version;
}
+ /**
+ * Did this request force a refresh? Requests that set {@link WriteRequest#setRefreshPolicy(RefreshPolicy)} to
+ * {@link RefreshPolicy#IMMEDIATE} will always return true for this. Requests that set it to {@link RefreshPolicy#WAIT_UNTIL} will
+ * only return true here if they run out of refresh listener slots (see {@link IndexSettings#MAX_REFRESH_LISTENERS_PER_SHARD}).
+ */
+ public boolean forcedRefresh() {
+ return forcedRefresh;
+ }
+
+ @Override
+ public void setForcedRefresh(boolean forcedRefresh) {
+ this.forcedRefresh = forcedRefresh;
+ }
+
/** returns the rest status for this response (based on {@link ShardInfo#status()} */
public RestStatus status() {
return getShardInfo().status();
@@ -97,6 +117,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
type = in.readString();
id = in.readString();
version = in.readZLong();
+ forcedRefresh = in.readBoolean();
}
@Override
@@ -106,6 +127,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
out.writeString(type);
out.writeString(id);
out.writeZLong(version);
+ out.writeBoolean(forcedRefresh);
}
static final class Fields {
@@ -121,7 +143,8 @@ public abstract class DocWriteResponse extends ReplicationResponse implements St
builder.field(Fields._INDEX, shardId.getIndexName())
.field(Fields._TYPE, type)
.field(Fields._ID, id)
- .field(Fields._VERSION, version);
+ .field(Fields._VERSION, version)
+ .field("forced_refresh", forcedRefresh);
shardInfo.toXContent(builder, params);
return builder;
}
diff --git a/core/src/main/java/org/elasticsearch/action/IndicesRequest.java b/core/src/main/java/org/elasticsearch/action/IndicesRequest.java
index 4c62a7e849b..3ef699818b6 100644
--- a/core/src/main/java/org/elasticsearch/action/IndicesRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/IndicesRequest.java
@@ -40,7 +40,7 @@ public interface IndicesRequest {
*/
IndicesOptions indicesOptions();
- static interface Replaceable extends IndicesRequest {
+ interface Replaceable extends IndicesRequest {
/**
* Sets the indices that the action relates to.
*/
diff --git a/core/src/main/java/org/elasticsearch/action/LatchedActionListener.java b/core/src/main/java/org/elasticsearch/action/LatchedActionListener.java
index fb0fd81a7be..e5e0af93072 100644
--- a/core/src/main/java/org/elasticsearch/action/LatchedActionListener.java
+++ b/core/src/main/java/org/elasticsearch/action/LatchedActionListener.java
@@ -45,7 +45,7 @@ public class LatchedActionListener implements ActionListener {
}
@Override
- public void onFailure(Throwable e) {
+ public void onFailure(Exception e) {
try {
delegate.onFailure(e);
} finally {
diff --git a/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java b/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java
index 67436f31772..6704f610ec0 100644
--- a/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java
+++ b/core/src/main/java/org/elasticsearch/action/TaskOperationFailure.java
@@ -43,15 +43,15 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
private final long taskId;
- private final Throwable reason;
+ private final Exception reason;
private final RestStatus status;
- public TaskOperationFailure(String nodeId, long taskId, Throwable t) {
+ public TaskOperationFailure(String nodeId, long taskId, Exception e) {
this.nodeId = nodeId;
this.taskId = taskId;
- this.reason = t;
- status = ExceptionsHelper.status(t);
+ this.reason = e;
+ status = ExceptionsHelper.status(e);
}
/**
@@ -60,7 +60,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
public TaskOperationFailure(StreamInput in) throws IOException {
nodeId = in.readString();
taskId = in.readLong();
- reason = in.readThrowable();
+ reason = in.readException();
status = RestStatus.readFrom(in);
}
@@ -68,7 +68,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
public void writeTo(StreamOutput out) throws IOException {
out.writeString(nodeId);
out.writeLong(taskId);
- out.writeThrowable(reason);
+ out.writeException(reason);
RestStatus.writeTo(out, status);
}
diff --git a/core/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/core/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java
index b12af818c2e..7d57e5bd60a 100644
--- a/core/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java
+++ b/core/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java
@@ -35,7 +35,6 @@ public class TransportActionNodeProxy action;
private final TransportRequestOptions transportOptions;
- @Inject
public TransportActionNodeProxy(Settings settings, GenericAction action, TransportService transportService) {
super(settings);
this.action = action;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java
index dbabe681c7a..e007929faf2 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java
@@ -42,17 +42,22 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
private final ShardId shard;
private final boolean primary;
+ private final boolean hasPendingAsyncFetch;
private final String assignedNodeId;
private final UnassignedInfo unassignedInfo;
+ private final long allocationDelayMillis;
private final long remainingDelayMillis;
private final Map nodeExplanations;
- public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long remainingDelayMillis,
- @Nullable UnassignedInfo unassignedInfo, Map nodeExplanations) {
+ public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
+ long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
+ Map nodeExplanations) {
this.shard = shard;
this.primary = primary;
+ this.hasPendingAsyncFetch = hasPendingAsyncFetch;
this.assignedNodeId = assignedNodeId;
this.unassignedInfo = unassignedInfo;
+ this.allocationDelayMillis = allocationDelayMillis;
this.remainingDelayMillis = remainingDelayMillis;
this.nodeExplanations = nodeExplanations;
}
@@ -60,8 +65,10 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
public ClusterAllocationExplanation(StreamInput in) throws IOException {
this.shard = ShardId.readShardId(in);
this.primary = in.readBoolean();
+ this.hasPendingAsyncFetch = in.readBoolean();
this.assignedNodeId = in.readOptionalString();
this.unassignedInfo = in.readOptionalWriteable(UnassignedInfo::new);
+ this.allocationDelayMillis = in.readVLong();
this.remainingDelayMillis = in.readVLong();
int mapSize = in.readVInt();
@@ -77,8 +84,10 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
public void writeTo(StreamOutput out) throws IOException {
this.getShard().writeTo(out);
out.writeBoolean(this.isPrimary());
+ out.writeBoolean(this.isStillFetchingShardData());
out.writeOptionalString(this.getAssignedNodeId());
out.writeOptionalWriteable(this.getUnassignedInfo());
+ out.writeVLong(allocationDelayMillis);
out.writeVLong(remainingDelayMillis);
out.writeVInt(this.nodeExplanations.size());
@@ -97,6 +106,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
return this.primary;
}
+ /** Return turn if shard data is still being fetched for the allocation */
+ public boolean isStillFetchingShardData() {
+ return this.hasPendingAsyncFetch;
+ }
+
/** Return turn if the shard is assigned to a node */
public boolean isAssigned() {
return this.assignedNodeId != null;
@@ -114,7 +128,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
return this.unassignedInfo;
}
- /** Return the remaining allocation delay for this shard in millisocends */
+ /** Return the configured delay before the shard can be allocated in milliseconds */
+ public long getAllocationDelayMillis() {
+ return this.allocationDelayMillis;
+ }
+
+ /** Return the remaining allocation delay for this shard in milliseconds */
public long getRemainingDelayMillis() {
return this.remainingDelayMillis;
}
@@ -138,11 +157,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
if (assignedNodeId != null) {
builder.field("assigned_node_id", this.assignedNodeId);
}
+ builder.field("shard_state_fetch_pending", this.hasPendingAsyncFetch);
// If we have unassigned info, show that
if (unassignedInfo != null) {
unassignedInfo.toXContent(builder, params);
- long delay = unassignedInfo.getLastComputedLeftDelayNanos();
- builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueNanos(delay));
+ builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
}
builder.startObject("nodes");
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java
index 8f467402bc4..e564711d418 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/NodeExplanation.java
@@ -43,7 +43,7 @@ public class NodeExplanation implements Writeable, ToXContent {
private final String finalExplanation;
public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight,
- final @Nullable IndicesShardStoresResponse.StoreStatus storeStatus,
+ @Nullable final IndicesShardStoresResponse.StoreStatus storeStatus,
final ClusterAllocationExplanation.FinalDecision finalDecision,
final String finalExplanation,
final ClusterAllocationExplanation.StoreCopy storeCopy) {
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
index 08e899be4df..d63a7ff8968 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java
@@ -19,7 +19,6 @@
package org.elasticsearch.action.admin.cluster.allocation;
-import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.lucene.index.CorruptIndexException;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
@@ -30,24 +29,18 @@ import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStores
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterInfoService;
-import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.metadata.MetaData.Custom;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
-import org.elasticsearch.cluster.routing.RoutingNodes.RoutingNodesIterator;
-import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
-import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
-import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
@@ -56,15 +49,17 @@ import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.HashMap;
-import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
+
/**
* The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
* master node in the cluster.
@@ -72,26 +67,26 @@ import java.util.Set;
public class TransportClusterAllocationExplainAction
extends TransportMasterNodeAction {
- private final AllocationService allocationService;
private final ClusterInfoService clusterInfoService;
private final AllocationDeciders allocationDeciders;
private final ShardsAllocator shardAllocator;
private final TransportIndicesShardStoresAction shardStoresAction;
+ private final GatewayAllocator gatewayAllocator;
@Inject
public TransportClusterAllocationExplainAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
- AllocationService allocationService, ClusterInfoService clusterInfoService,
- AllocationDeciders allocationDeciders, ShardsAllocator shardAllocator,
- TransportIndicesShardStoresAction shardStoresAction) {
+ ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
+ ShardsAllocator shardAllocator, TransportIndicesShardStoresAction shardStoresAction,
+ GatewayAllocator gatewayAllocator) {
super(settings, ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
indexNameExpressionResolver, ClusterAllocationExplainRequest::new);
- this.allocationService = allocationService;
this.clusterInfoService = clusterInfoService;
this.allocationDeciders = allocationDeciders;
this.shardAllocator = shardAllocator;
this.shardStoresAction = shardStoresAction;
+ this.gatewayAllocator = gatewayAllocator;
}
@Override
@@ -140,7 +135,8 @@ public class TransportClusterAllocationExplainAction
Float nodeWeight,
IndicesShardStoresResponse.StoreStatus storeStatus,
String assignedNodeId,
- Set activeAllocationIds) {
+ Set activeAllocationIds,
+ boolean hasPendingAsyncFetch) {
final ClusterAllocationExplanation.FinalDecision finalDecision;
final ClusterAllocationExplanation.StoreCopy storeCopy;
final String finalExplanation;
@@ -149,7 +145,7 @@ public class TransportClusterAllocationExplainAction
// No copies of the data
storeCopy = ClusterAllocationExplanation.StoreCopy.NONE;
} else {
- final Throwable storeErr = storeStatus.getStoreException();
+ final Exception storeErr = storeStatus.getStoreException();
if (storeErr != null) {
if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) {
storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT;
@@ -171,6 +167,19 @@ public class TransportClusterAllocationExplainAction
if (node.getId().equals(assignedNodeId)) {
finalDecision = ClusterAllocationExplanation.FinalDecision.ALREADY_ASSIGNED;
finalExplanation = "the shard is already assigned to this node";
+ } else if (hasPendingAsyncFetch &&
+ shard.primary() == false &&
+ shard.unassigned() &&
+ shard.allocatedPostIndexCreate(indexMetaData) &&
+ nodeDecision.type() != Decision.Type.YES) {
+ finalExplanation = "the shard cannot be assigned because allocation deciders return a " + nodeDecision.type().name() +
+ " decision and the shard's state is still being fetched";
+ finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
+ } else if (hasPendingAsyncFetch &&
+ shard.unassigned() &&
+ shard.allocatedPostIndexCreate(indexMetaData)) {
+ finalExplanation = "the shard's state is still being fetched so it cannot be allocated";
+ finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
} else if (shard.primary() && shard.unassigned() && shard.allocatedPostIndexCreate(indexMetaData) &&
storeCopy == ClusterAllocationExplanation.StoreCopy.STALE) {
finalExplanation = "the copy of the shard is stale, allocation ids do not match";
@@ -190,6 +199,7 @@ public class TransportClusterAllocationExplainAction
finalDecision = ClusterAllocationExplanation.FinalDecision.NO;
finalExplanation = "the shard cannot be assigned because one or more allocation decider returns a 'NO' decision";
} else {
+ // TODO: handle throttling decision better here
finalDecision = ClusterAllocationExplanation.FinalDecision.YES;
if (storeCopy == ClusterAllocationExplanation.StoreCopy.AVAILABLE) {
finalExplanation = "the shard can be assigned and the node contains a valid copy of the shard data";
@@ -208,16 +218,15 @@ public class TransportClusterAllocationExplainAction
*/
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
boolean includeYesDecisions, ShardsAllocator shardAllocator,
- List shardStores) {
+ List shardStores,
+ GatewayAllocator gatewayAllocator) {
// don't short circuit deciders, we want a full explanation
allocation.debugDecision(true);
// get the existing unassigned info if available
UnassignedInfo ui = shard.unassignedInfo();
- RoutingNodesIterator iter = routingNodes.nodes();
Map nodeToDecision = new HashMap<>();
- while (iter.hasNext()) {
- RoutingNode node = iter.next();
+ for (RoutingNode node : routingNodes) {
DiscoveryNode discoNode = node.node();
if (discoNode.isDataNode()) {
Decision d = tryShardOnNode(shard, node, allocation, includeYesDecisions);
@@ -227,9 +236,9 @@ public class TransportClusterAllocationExplainAction
long remainingDelayMillis = 0;
final MetaData metadata = allocation.metaData();
final IndexMetaData indexMetaData = metadata.index(shard.index());
- if (ui != null) {
- final Settings indexSettings = indexMetaData.getSettings();
- long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), metadata.settings(), indexSettings);
+ long allocationDelayMillis = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).getMillis();
+ if (ui != null && ui.isDelayed()) {
+ long remainingDelayNanos = ui.getRemainingDelay(System.nanoTime(), indexMetaData.getSettings());
remainingDelayMillis = TimeValue.timeValueNanos(remainingDelayNanos).millis();
}
@@ -248,19 +257,21 @@ public class TransportClusterAllocationExplainAction
Float weight = weights.get(node);
IndicesShardStoresResponse.StoreStatus storeStatus = nodeToStatus.get(node);
NodeExplanation nodeExplanation = calculateNodeExplanation(shard, indexMetaData, node, decision, weight,
- storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()));
+ storeStatus, shard.currentNodeId(), indexMetaData.activeAllocationIds(shard.getId()),
+ allocation.hasPendingAsyncFetch());
explanations.put(node, nodeExplanation);
}
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
- shard.currentNodeId(), remainingDelayMillis, ui, explanations);
+ shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
+ gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations);
}
@Override
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
final ActionListener listener) {
final RoutingNodes routingNodes = state.getRoutingNodes();
- final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state.nodes(),
- clusterInfoService.getClusterInfo(), System.nanoTime());
+ final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
+ clusterInfoService.getClusterInfo(), System.nanoTime(), false);
ShardRouting foundShard = null;
if (request.useAnyUnassignedShard()) {
@@ -307,12 +318,12 @@ public class TransportClusterAllocationExplainAction
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
List shardStoreStatus = shardStatuses.get(shardRouting.id());
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
- request.includeYesDecisions(), shardAllocator, shardStoreStatus);
+ request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator);
listener.onResponse(new ClusterAllocationExplainResponse(cae));
}
@Override
- public void onFailure(Throwable e) {
+ public void onFailure(Exception e) {
listener.onFailure(e);
}
});
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java
index 59b426d8c31..27970f332fc 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequest.java
@@ -33,8 +33,6 @@ import org.elasticsearch.common.unit.TimeValue;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
-import static org.elasticsearch.common.unit.TimeValue.readTimeValue;
-
/**
*
*/
@@ -160,7 +158,7 @@ public class ClusterHealthRequest extends MasterNodeReadRequest {
- private final ClusterName clusterName;
private final GatewayAllocator gatewayAllocator;
@Inject
public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService,
- ThreadPool threadPool, ClusterName clusterName, ActionFilters actionFilters,
+ ThreadPool threadPool, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, GatewayAllocator gatewayAllocator) {
- super(settings, ClusterHealthAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterHealthRequest::new);
- this.clusterName = clusterName;
+ super(settings, ClusterHealthAction.NAME, false, transportService, clusterService, threadPool, actionFilters,
+ indexNameExpressionResolver, ClusterHealthRequest::new);
this.gatewayAllocator = gatewayAllocator;
}
@@ -106,9 +104,9 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
}
@Override
- public void onFailure(String source, Throwable t) {
- logger.error("unexpected failure during [{}]", t, source);
- listener.onFailure(t);
+ public void onFailure(String source, Exception e) {
+ logger.error("unexpected failure during [{}]", e, source);
+ listener.onFailure(e);
}
@Override
@@ -283,14 +281,14 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
} catch (IndexNotFoundException e) {
// one of the specified indices is not there - treat it as RED.
- ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState,
+ ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY, clusterState,
numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState),
pendingTaskTimeInQueue);
response.setStatus(ClusterHealthStatus.RED);
return response;
}
- return new ClusterHealthResponse(clusterName.value(), concreteIndices, clusterState, numberOfPendingTasks,
+ return new ClusterHealthResponse(clusterState.getClusterName().value(), concreteIndices, clusterState, numberOfPendingTasks,
numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), pendingTaskTimeInQueue);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java
index 7c8f797fdcb..e3df7f57312 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java
@@ -101,7 +101,7 @@ public class NodesHotThreadsRequest extends BaseNodesRequest {
NodesHotThreadsResponse() {
}
- public NodesHotThreadsResponse(ClusterName clusterName, NodeHotThreads[] nodes) {
- super(clusterName, nodes);
+ public NodesHotThreadsResponse(ClusterName clusterName, List nodes, List failures) {
+ super(clusterName, nodes, failures);
}
@Override
- public void readFrom(StreamInput in) throws IOException {
- super.readFrom(in);
- nodes = new NodeHotThreads[in.readVInt()];
- for (int i = 0; i < nodes.length; i++) {
- nodes[i] = NodeHotThreads.readNodeHotThreads(in);
- }
+ protected List readNodesFrom(StreamInput in) throws IOException {
+ return in.readList(NodeHotThreads::readNodeHotThreads);
}
@Override
- public void writeTo(StreamOutput out) throws IOException {
- super.writeTo(out);
- out.writeVInt(nodes.length);
- for (NodeHotThreads node : nodes) {
- node.writeTo(out);
- }
+ protected void writeNodesTo(StreamOutput out, List nodes) throws IOException {
+ out.writeStreamableList(nodes);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java
index d53f651da45..73403f40318 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java
@@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.cluster.node.hotthreads;
import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
@@ -35,33 +36,28 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.List;
-import java.util.concurrent.atomic.AtomicReferenceArray;
/**
*
*/
-public class TransportNodesHotThreadsAction extends TransportNodesAction {
+public class TransportNodesHotThreadsAction extends TransportNodesAction {
@Inject
- public TransportNodesHotThreadsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ public TransportNodesHotThreadsAction(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
- super(settings, NodesHotThreadsAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters,
- indexNameExpressionResolver, NodesHotThreadsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC);
+ super(settings, NodesHotThreadsAction.NAME, threadPool, clusterService, transportService, actionFilters,
+ indexNameExpressionResolver, NodesHotThreadsRequest::new, NodeRequest::new, ThreadPool.Names.GENERIC, NodeHotThreads.class);
}
@Override
- protected NodesHotThreadsResponse newResponse(NodesHotThreadsRequest request, AtomicReferenceArray responses) {
- final List nodes = new ArrayList<>();
- for (int i = 0; i < responses.length(); i++) {
- Object resp = responses.get(i);
- if (resp instanceof NodeHotThreads) {
- nodes.add((NodeHotThreads) resp);
- }
- }
- return new NodesHotThreadsResponse(clusterName, nodes.toArray(new NodeHotThreads[nodes.size()]));
+ protected NodesHotThreadsResponse newResponse(NodesHotThreadsRequest request,
+ List responses, List failures) {
+ return new NodesHotThreadsResponse(clusterService.getClusterName(), responses, failures);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java
index 87ec2d052ab..d7ce899792f 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java
@@ -27,8 +27,9 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.http.HttpInfo;
-import org.elasticsearch.ingest.core.IngestInfo;
+import org.elasticsearch.ingest.IngestInfo;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.os.OsInfo;
import org.elasticsearch.monitor.process.ProcessInfo;
@@ -45,8 +46,6 @@ import static java.util.Collections.unmodifiableMap;
* Node information (static, does not change over time).
*/
public class NodeInfo extends BaseNodeResponse {
- @Nullable
- private Map serviceAttributes;
private Version version;
private Build build;
@@ -78,16 +77,19 @@ public class NodeInfo extends BaseNodeResponse {
@Nullable
private IngestInfo ingest;
+ @Nullable
+ private ByteSizeValue totalIndexingBuffer;
+
public NodeInfo() {
}
- public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map serviceAttributes, @Nullable Settings settings,
+ public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings,
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
- @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest) {
+ @Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest,
+ @Nullable ByteSizeValue totalIndexingBuffer) {
super(node);
this.version = version;
this.build = build;
- this.serviceAttributes = serviceAttributes;
this.settings = settings;
this.os = os;
this.process = process;
@@ -97,6 +99,7 @@ public class NodeInfo extends BaseNodeResponse {
this.http = http;
this.plugins = plugins;
this.ingest = ingest;
+ this.totalIndexingBuffer = totalIndexingBuffer;
}
/**
@@ -121,14 +124,6 @@ public class NodeInfo extends BaseNodeResponse {
return this.build;
}
- /**
- * The service attributes of the node.
- */
- @Nullable
- public Map getServiceAttributes() {
- return this.serviceAttributes;
- }
-
/**
* The settings of the node.
*/
@@ -186,6 +181,11 @@ public class NodeInfo extends BaseNodeResponse {
return ingest;
}
+ @Nullable
+ public ByteSizeValue getTotalIndexingBuffer() {
+ return totalIndexingBuffer;
+ }
+
public static NodeInfo readNodeInfo(StreamInput in) throws IOException {
NodeInfo nodeInfo = new NodeInfo();
nodeInfo.readFrom(in);
@@ -198,12 +198,9 @@ public class NodeInfo extends BaseNodeResponse {
version = Version.readVersion(in);
build = Build.readBuild(in);
if (in.readBoolean()) {
- Map builder = new HashMap<>();
- int size = in.readVInt();
- for (int i = 0; i < size; i++) {
- builder.put(in.readString(), in.readString());
- }
- serviceAttributes = unmodifiableMap(builder);
+ totalIndexingBuffer = new ByteSizeValue(in.readLong());
+ } else {
+ totalIndexingBuffer = null;
}
if (in.readBoolean()) {
settings = Settings.readSettingsFromStream(in);
@@ -240,15 +237,11 @@ public class NodeInfo extends BaseNodeResponse {
super.writeTo(out);
out.writeVInt(version.id);
Build.writeBuild(build, out);
- if (getServiceAttributes() == null) {
+ if (totalIndexingBuffer == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
- out.writeVInt(serviceAttributes.size());
- for (Map.Entry entry : serviceAttributes.entrySet()) {
- out.writeString(entry.getKey());
- out.writeString(entry.getValue());
- }
+ out.writeLong(totalIndexingBuffer.bytes());
}
if (settings == null) {
out.writeBoolean(false);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
index 66c5cfd65d4..b547d1d7432 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java
@@ -39,6 +39,7 @@ public class NodesInfoRequest extends BaseNodesRequest {
private boolean http = true;
private boolean plugins = true;
private boolean ingest = true;
+ private boolean indices = true;
public NodesInfoRequest() {
}
@@ -64,6 +65,7 @@ public class NodesInfoRequest extends BaseNodesRequest {
http = false;
plugins = false;
ingest = false;
+ indices = false;
return this;
}
@@ -80,6 +82,7 @@ public class NodesInfoRequest extends BaseNodesRequest {
http = true;
plugins = true;
ingest = true;
+ indices = true;
return this;
}
@@ -221,6 +224,22 @@ public class NodesInfoRequest extends BaseNodesRequest {
return ingest;
}
+ /**
+ * Should information about indices (currently just indexing buffers) be returned
+ * @param indices true if you want info
+ */
+ public NodesInfoRequest indices(boolean indices) {
+ this.indices = indices;
+ return this;
+ }
+
+ /**
+ * @return true if information about indices (currently just indexing buffers)
+ */
+ public boolean indices() {
+ return indices;
+ }
+
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@@ -233,6 +252,7 @@ public class NodesInfoRequest extends BaseNodesRequest {
http = in.readBoolean();
plugins = in.readBoolean();
ingest = in.readBoolean();
+ indices = in.readBoolean();
}
@Override
@@ -247,5 +267,6 @@ public class NodesInfoRequest extends BaseNodesRequest {
out.writeBoolean(http);
out.writeBoolean(plugins);
out.writeBoolean(ingest);
+ out.writeBoolean(indices);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java
index fc484012379..16befb79aab 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequestBuilder.java
@@ -118,4 +118,12 @@ public class NodesInfoRequestBuilder extends NodesOperationRequestBuilder implements To
public NodesInfoResponse() {
}
- public NodesInfoResponse(ClusterName clusterName, NodeInfo[] nodes) {
- super(clusterName, nodes);
+ public NodesInfoResponse(ClusterName clusterName, List nodes, List failures) {
+ super(clusterName, nodes, failures);
}
@Override
- public void readFrom(StreamInput in) throws IOException {
- super.readFrom(in);
- nodes = new NodeInfo[in.readVInt()];
- for (int i = 0; i < nodes.length; i++) {
- nodes[i] = NodeInfo.readNodeInfo(in);
- }
+ protected List readNodesFrom(StreamInput in) throws IOException {
+ return in.readList(NodeInfo::readNodeInfo);
}
@Override
- public void writeTo(StreamOutput out) throws IOException {
- super.writeTo(out);
- out.writeVInt(nodes.length);
- for (NodeInfo node : nodes) {
- node.writeTo(out);
- }
+ protected void writeNodesTo(StreamOutput out, List nodes) throws IOException {
+ out.writeStreamableList(nodes);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.field("cluster_name", getClusterName().value());
-
builder.startObject("nodes");
- for (NodeInfo nodeInfo : this) {
+ for (NodeInfo nodeInfo : getNodes()) {
builder.startObject(nodeInfo.getNode().getId());
builder.field("name", nodeInfo.getNode().getName());
@@ -77,11 +69,8 @@ public class NodesInfoResponse extends BaseNodesResponse implements To
builder.field("version", nodeInfo.getVersion());
builder.field("build_hash", nodeInfo.getBuild().shortHash());
-
- if (nodeInfo.getServiceAttributes() != null) {
- for (Map.Entry nodeAttribute : nodeInfo.getServiceAttributes().entrySet()) {
- builder.field(nodeAttribute.getKey(), nodeAttribute.getValue());
- }
+ if (nodeInfo.getTotalIndexingBuffer() != null) {
+ builder.byteSizeField("total_indexing_buffer", "total_indexing_buffer_in_bytes", nodeInfo.getTotalIndexingBuffer());
}
builder.startArray("roles");
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java
index f52729faa4f..028198cf831 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/info/TransportNodesInfoAction.java
@@ -19,10 +19,10 @@
package org.elasticsearch.action.admin.cluster.node.info;
+import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
-import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
@@ -34,36 +34,32 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.List;
-import java.util.concurrent.atomic.AtomicReferenceArray;
/**
*
*/
-public class TransportNodesInfoAction extends TransportNodesAction {
+public class TransportNodesInfoAction extends TransportNodesAction {
private final NodeService nodeService;
@Inject
- public TransportNodesInfoAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ public TransportNodesInfoAction(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
- NodeService nodeService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
- super(settings, NodesInfoAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters,
- indexNameExpressionResolver, NodesInfoRequest::new, NodeInfoRequest::new, ThreadPool.Names.MANAGEMENT);
+ NodeService nodeService, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver) {
+ super(settings, NodesInfoAction.NAME, threadPool, clusterService, transportService, actionFilters,
+ indexNameExpressionResolver, NodesInfoRequest::new, NodeInfoRequest::new, ThreadPool.Names.MANAGEMENT, NodeInfo.class);
this.nodeService = nodeService;
}
@Override
- protected NodesInfoResponse newResponse(NodesInfoRequest nodesInfoRequest, AtomicReferenceArray responses) {
- final List nodesInfos = new ArrayList<>();
- for (int i = 0; i < responses.length(); i++) {
- Object resp = responses.get(i);
- if (resp instanceof NodeInfo) {
- nodesInfos.add((NodeInfo) resp);
- }
- }
- return new NodesInfoResponse(clusterName, nodesInfos.toArray(new NodeInfo[nodesInfos.size()]));
+ protected NodesInfoResponse newResponse(NodesInfoRequest nodesInfoRequest,
+ List responses, List failures) {
+ return new NodesInfoResponse(clusterService.getClusterName(), responses, failures);
}
@Override
@@ -80,7 +76,7 @@ public class TransportNodesInfoAction extends TransportNodesAction {
private final ClusterService clusterService;
- private final ClusterName clusterName;
public static final String NAME = "cluster:monitor/nodes/liveness";
@Inject
- public TransportLivenessAction(ClusterName clusterName,
- ClusterService clusterService, TransportService transportService) {
+ public TransportLivenessAction(ClusterService clusterService, TransportService transportService) {
this.clusterService = clusterService;
- this.clusterName = clusterName;
- transportService.registerRequestHandler(NAME, LivenessRequest::new, ThreadPool.Names.SAME, this);
+ transportService.registerRequestHandler(NAME, LivenessRequest::new, ThreadPool.Names.SAME,
+ false, false /*can not trip circuit breaker*/, this);
}
@Override
public void messageReceived(LivenessRequest request, TransportChannel channel) throws Exception {
- channel.sendResponse(new LivenessResponse(clusterName, clusterService.localNode()));
+ channel.sendResponse(new LivenessResponse(clusterService.getClusterName(), clusterService.localNode()));
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java
index af28c1fb5d5..1a9023ab93c 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java
@@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.cluster.node.stats;
+import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.io.stream.StreamInput;
@@ -28,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import java.io.IOException;
+import java.util.List;
/**
*
@@ -37,34 +39,24 @@ public class NodesStatsResponse extends BaseNodesResponse implements
NodesStatsResponse() {
}
- public NodesStatsResponse(ClusterName clusterName, NodeStats[] nodes) {
- super(clusterName, nodes);
+ public NodesStatsResponse(ClusterName clusterName, List nodes, List failures) {
+ super(clusterName, nodes, failures);
}
@Override
- public void readFrom(StreamInput in) throws IOException {
- super.readFrom(in);
- nodes = new NodeStats[in.readVInt()];
- for (int i = 0; i < nodes.length; i++) {
- nodes[i] = NodeStats.readNodeStats(in);
- }
+ protected List readNodesFrom(StreamInput in) throws IOException {
+ return in.readList(NodeStats::readNodeStats);
}
@Override
- public void writeTo(StreamOutput out) throws IOException {
- super.writeTo(out);
- out.writeVInt(nodes.length);
- for (NodeStats node : nodes) {
- node.writeTo(out);
- }
+ protected void writeNodesTo(StreamOutput out, List nodes) throws IOException {
+ out.writeStreamableList(nodes);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.field("cluster_name", getClusterName().value());
-
builder.startObject("nodes");
- for (NodeStats nodeStats : this) {
+ for (NodeStats nodeStats : getNodes()) {
builder.startObject(nodeStats.getNode().getId());
builder.field("timestamp", nodeStats.getTimestamp());
nodeStats.toXContent(builder, params);
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java
index 8ba3d00558b..5863e54d08f 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java
@@ -19,10 +19,10 @@
package org.elasticsearch.action.admin.cluster.node.stats;
+import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
-import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
@@ -34,36 +34,31 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
-import java.util.ArrayList;
import java.util.List;
-import java.util.concurrent.atomic.AtomicReferenceArray;
/**
*
*/
-public class TransportNodesStatsAction extends TransportNodesAction {
+public class TransportNodesStatsAction extends TransportNodesAction {
private final NodeService nodeService;
@Inject
- public TransportNodesStatsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
+ public TransportNodesStatsAction(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
- NodeService nodeService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
- super(settings, NodesStatsAction.NAME, clusterName, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
- NodesStatsRequest::new, NodeStatsRequest::new, ThreadPool.Names.MANAGEMENT);
+ NodeService nodeService, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver) {
+ super(settings, NodesStatsAction.NAME, threadPool, clusterService, transportService, actionFilters,
+ indexNameExpressionResolver, NodesStatsRequest::new, NodeStatsRequest::new, ThreadPool.Names.MANAGEMENT, NodeStats.class);
this.nodeService = nodeService;
}
@Override
- protected NodesStatsResponse newResponse(NodesStatsRequest nodesInfoRequest, AtomicReferenceArray responses) {
- final List nodeStats = new ArrayList<>();
- for (int i = 0; i < responses.length(); i++) {
- Object resp = responses.get(i);
- if (resp instanceof NodeStats) {
- nodeStats.add((NodeStats) resp);
- }
- }
- return new NodesStatsResponse(clusterName, nodeStats.toArray(new NodeStats[nodeStats.size()]));
+ protected NodesStatsResponse newResponse(NodesStatsRequest request, List responses, List failures) {
+ return new NodesStatsResponse(clusterService.getClusterName(), responses, failures);
}
@Override
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java
index 716e9027bf0..9bfeaecd78b 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/CancelTasksResponse.java
@@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
-import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
+import org.elasticsearch.tasks.TaskInfo;
import java.util.List;
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java
index a52a0358983..6d5936db67a 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/cancel/TransportCancelTasksAction.java
@@ -22,10 +22,8 @@ package org.elasticsearch.action.admin.cluster.node.tasks.cancel;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
-import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
-import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
@@ -36,6 +34,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.CancellableTask;
import org.elasticsearch.tasks.TaskId;
+import org.elasticsearch.tasks.TaskInfo;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
@@ -63,10 +62,10 @@ public class TransportCancelTasksAction extends TransportTasksAction {
+/**
+ * Action for retrieving a list of currently running tasks
+ */
+public class GetTaskAction extends Action {
- public static final RenderSearchTemplateAction INSTANCE = new RenderSearchTemplateAction();
- public static final String NAME = "cluster:admin/render/template/search";
+ public static final GetTaskAction INSTANCE = new GetTaskAction();
+ public static final String NAME = "cluster:monitor/task/get";
- public RenderSearchTemplateAction() {
+ private GetTaskAction() {
super(NAME);
}
@Override
- public RenderSearchTemplateRequestBuilder newRequestBuilder(ElasticsearchClient client) {
- return new RenderSearchTemplateRequestBuilder(client, this);
+ public GetTaskResponse newResponse() {
+ return new GetTaskResponse();
}
@Override
- public RenderSearchTemplateResponse newResponse() {
- return new RenderSearchTemplateResponse();
+ public GetTaskRequestBuilder newRequestBuilder(ElasticsearchClient client) {
+ return new GetTaskRequestBuilder(client, this);
}
-
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java
new file mode 100644
index 00000000000..efbc9679e71
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequest.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.tasks.get;
+
+import org.elasticsearch.action.ActionRequest;
+import org.elasticsearch.action.ActionRequestValidationException;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.tasks.TaskId;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.ValidateActions.addValidationError;
+
+/**
+ * A request to get node tasks
+ */
+public class GetTaskRequest extends ActionRequest {
+ private TaskId taskId = TaskId.EMPTY_TASK_ID;
+ private boolean waitForCompletion = false;
+ private TimeValue timeout = null;
+
+ /**
+ * Get the TaskId to look up.
+ */
+ public TaskId getTaskId() {
+ return taskId;
+ }
+
+ /**
+ * Set the TaskId to look up. Required.
+ */
+ public GetTaskRequest setTaskId(TaskId taskId) {
+ this.taskId = taskId;
+ return this;
+ }
+
+ /**
+ * Should this request wait for all found tasks to complete?
+ */
+ public boolean getWaitForCompletion() {
+ return waitForCompletion;
+ }
+
+ /**
+ * Should this request wait for all found tasks to complete?
+ */
+ public GetTaskRequest setWaitForCompletion(boolean waitForCompletion) {
+ this.waitForCompletion = waitForCompletion;
+ return this;
+ }
+
+ /**
+ * Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2.
+ */
+ public TimeValue getTimeout() {
+ return timeout;
+ }
+
+ /**
+ * Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2.
+ */
+ public GetTaskRequest setTimeout(TimeValue timeout) {
+ this.timeout = timeout;
+ return this;
+ }
+
+ GetTaskRequest nodeRequest(String thisNodeId, long thisTaskId) {
+ GetTaskRequest copy = new GetTaskRequest();
+ copy.setParentTask(thisNodeId, thisTaskId);
+ copy.setTaskId(taskId);
+ copy.setTimeout(timeout);
+ copy.setWaitForCompletion(waitForCompletion);
+ return copy;
+ }
+
+ @Override
+ public ActionRequestValidationException validate() {
+ ActionRequestValidationException validationException = null;
+ if (false == getTaskId().isSet()) {
+ validationException = addValidationError("task id is required", validationException);
+ }
+ return validationException;
+ }
+
+ @Override
+ public void readFrom(StreamInput in) throws IOException {
+ super.readFrom(in);
+ taskId = TaskId.readFromStream(in);
+ timeout = in.readOptionalWriteable(TimeValue::new);
+ waitForCompletion = in.readBoolean();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ taskId.writeTo(out);
+ out.writeOptionalWriteable(timeout);
+ out.writeBoolean(waitForCompletion);
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java
new file mode 100644
index 00000000000..e1042df2ac3
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskRequestBuilder.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.tasks.get;
+
+import org.elasticsearch.action.ActionRequestBuilder;
+import org.elasticsearch.client.ElasticsearchClient;
+import org.elasticsearch.common.unit.TimeValue;
+import org.elasticsearch.tasks.TaskId;
+
+/**
+ * Builder for the request to retrieve the list of tasks running on the specified nodes
+ */
+public class GetTaskRequestBuilder extends ActionRequestBuilder {
+ public GetTaskRequestBuilder(ElasticsearchClient client, GetTaskAction action) {
+ super(client, action, new GetTaskRequest());
+ }
+
+ /**
+ * Set the TaskId to look up. Required.
+ */
+ public final GetTaskRequestBuilder setTaskId(TaskId taskId) {
+ request.setTaskId(taskId);
+ return this;
+ }
+
+ /**
+ * Should this request wait for all found tasks to complete?
+ */
+ public final GetTaskRequestBuilder setWaitForCompletion(boolean waitForCompletion) {
+ request.setWaitForCompletion(waitForCompletion);
+ return this;
+ }
+
+ /**
+ * Timeout to wait for any async actions this request must take. It must take anywhere from 0 to 2.
+ */
+ public final GetTaskRequestBuilder setTimeout(TimeValue timeout) {
+ request.setTimeout(timeout);
+ return this;
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java
similarity index 61%
rename from core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateResponse.java
rename to core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java
index d14a9a4f06a..afb03a7c9dc 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/RenderSearchTemplateResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/GetTaskResponse.java
@@ -17,52 +17,59 @@
* under the License.
*/
-package org.elasticsearch.action.admin.cluster.validate.template;
+package org.elasticsearch.action.admin.cluster.node.tasks.get;
import org.elasticsearch.action.ActionResponse;
-import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.tasks.PersistedTaskInfo;
import java.io.IOException;
-public class RenderSearchTemplateResponse extends ActionResponse implements ToXContent {
+import static java.util.Objects.requireNonNull;
- private BytesReference source;
+/**
+ * Returns the list of tasks currently running on the nodes
+ */
+public class GetTaskResponse extends ActionResponse implements ToXContent {
+ private PersistedTaskInfo task;
- public BytesReference source() {
- return source;
+ public GetTaskResponse() {
}
-
- public void source(BytesReference source) {
- this.source = source;
+
+ public GetTaskResponse(PersistedTaskInfo task) {
+ this.task = requireNonNull(task, "task is required");
}
-
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- super.writeTo(out);
- boolean hasSource = source != null;
- out.writeBoolean(hasSource);
- if (hasSource) {
- out.writeBytesReference(source);
- }
- }
-
+
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
- if (in.readBoolean()) {
- source = in.readBytesReference();
- }
+ task = in.readOptionalWriteable(PersistedTaskInfo::new);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeOptionalWriteable(task);
+ }
+
+ /**
+ * Get the actual result of the fetch.
+ */
+ public PersistedTaskInfo getTask() {
+ return task;
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startObject();
- builder.rawField("template_output", source);
- builder.endObject();
- return builder;
+ return task.innerToXContent(builder, params);
+ }
+
+ @Override
+ public String toString() {
+ return Strings.toString(this);
}
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
new file mode 100644
index 00000000000..430b07866c9
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.action.admin.cluster.node.tasks.get;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ExceptionsHelper;
+import org.elasticsearch.ResourceNotFoundException;
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.get.GetRequest;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.HandledTransportAction;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.ParseFieldMatcher;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.concurrent.AbstractRunnable;
+import org.elasticsearch.common.xcontent.XContentHelper;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.index.IndexNotFoundException;
+import org.elasticsearch.tasks.PersistedTaskInfo;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
+import org.elasticsearch.tasks.TaskInfo;
+import org.elasticsearch.tasks.TaskPersistenceService;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportException;
+import org.elasticsearch.transport.TransportRequestOptions;
+import org.elasticsearch.transport.TransportResponseHandler;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+
+import static org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction.waitForCompletionTimeout;
+
+/**
+ * Action to get a single task. If the task isn't running then it'll try to request the status from request index.
+ *
+ * The general flow is:
+ *
+ * If this isn't being executed on the node to which the requested TaskId belongs then move to that node.
+ * Look up the task and return it if it exists
+ * If it doesn't then look up the task from the results index
+ *
+ */
+public class TransportGetTaskAction extends HandledTransportAction {
+ private final ClusterService clusterService;
+ private final TransportService transportService;
+ private final Client client;
+
+ @Inject
+ public TransportGetTaskAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, Client client) {
+ super(settings, GetTaskAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, GetTaskRequest::new);
+ this.clusterService = clusterService;
+ this.transportService = transportService;
+ this.client = client;
+ }
+
+ @Override
+ protected void doExecute(GetTaskRequest request, ActionListener listener) {
+ throw new UnsupportedOperationException("Task is required");
+ }
+
+ @Override
+ protected void doExecute(Task thisTask, GetTaskRequest request, ActionListener listener) {
+ if (clusterService.localNode().getId().equals(request.getTaskId().getNodeId())) {
+ getRunningTaskFromNode(thisTask, request, listener);
+ } else {
+ runOnNodeWithTaskIfPossible(thisTask, request, listener);
+ }
+ }
+
+ /**
+ * Executed on the coordinating node to forward execution of the remaining work to the node that matches that requested
+ * {@link TaskId#getNodeId()}. If the node isn't in the cluster then this will just proceed to
+ * {@link #getFinishedTaskFromIndex(Task, GetTaskRequest, ActionListener)} on this node.
+ */
+ private void runOnNodeWithTaskIfPossible(Task thisTask, GetTaskRequest request, ActionListener listener) {
+ TransportRequestOptions.Builder builder = TransportRequestOptions.builder();
+ if (request.getTimeout() != null) {
+ builder.withTimeout(request.getTimeout());
+ }
+ builder.withCompress(false);
+ DiscoveryNode node = clusterService.state().nodes().get(request.getTaskId().getNodeId());
+ if (node == null) {
+ // Node is no longer part of the cluster! Try and look the task up from the results index.
+ getFinishedTaskFromIndex(thisTask, request, listener);
+ return;
+ }
+ GetTaskRequest nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId());
+ taskManager.registerChildTask(thisTask, node.getId());
+ transportService.sendRequest(node, GetTaskAction.NAME, nodeRequest, builder.build(),
+ new TransportResponseHandler() {
+ @Override
+ public GetTaskResponse newInstance() {
+ return new GetTaskResponse();
+ }
+
+ @Override
+ public void handleResponse(GetTaskResponse response) {
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void handleException(TransportException exp) {
+ listener.onFailure(exp);
+ }
+
+ @Override
+ public String executor() {
+ return ThreadPool.Names.SAME;
+ }
+ });
+ }
+
+ /**
+ * Executed on the node that should be running the task to find and return the running task. Falls back to
+ * {@link #getFinishedTaskFromIndex(Task, GetTaskRequest, ActionListener)} if the task isn't still running.
+ */
+ void getRunningTaskFromNode(Task thisTask, GetTaskRequest request, ActionListener listener) {
+ Task runningTask = taskManager.getTask(request.getTaskId().getId());
+ if (runningTask == null) {
+ // Task isn't running, go look in the task index
+ getFinishedTaskFromIndex(thisTask, request, listener);
+ } else {
+ if (request.getWaitForCompletion()) {
+ // Shift to the generic thread pool and let it wait for the task to complete so we don't block any important threads.
+ threadPool.generic().execute(new AbstractRunnable() {
+ @Override
+ protected void doRun() throws Exception {
+ taskManager.waitForTaskCompletion(runningTask, waitForCompletionTimeout(request.getTimeout()));
+ waitedForCompletion(thisTask, request, runningTask.taskInfo(clusterService.localNode(), true), listener);
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ listener.onFailure(e);
+ }
+ });
+ } else {
+ TaskInfo info = runningTask.taskInfo(clusterService.localNode(), true);
+ listener.onResponse(new GetTaskResponse(new PersistedTaskInfo(false, info)));
+ }
+ }
+ }
+
+ /**
+ * Called after waiting for the task to complete. Attempts to load the results of the task from the tasks index. If it isn't in the
+ * index then returns a snapshot of the task taken shortly after completion.
+ */
+ void waitedForCompletion(Task thisTask, GetTaskRequest request, TaskInfo snapshotOfRunningTask,
+ ActionListener listener) {
+ getFinishedTaskFromIndex(thisTask, request, new ActionListener() {
+ @Override
+ public void onResponse(GetTaskResponse response) {
+ // We were able to load the task from the task index. Let's send that back.
+ listener.onResponse(response);
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ /*
+ * We couldn't load the task from the task index. Instead of 404 we should use the snapshot we took after it finished. If
+ * the error isn't a 404 then we'll just throw it back to the user.
+ */
+ if (ExceptionsHelper.unwrap(e, ResourceNotFoundException.class) != null) {
+ listener.onResponse(new GetTaskResponse(new PersistedTaskInfo(true, snapshotOfRunningTask)));
+ } else {
+ listener.onFailure(e);
+ }
+ }
+ });
+ }
+
+ /**
+ * Send a {@link GetRequest} to the tasks index looking for a persisted copy of the task completed task. It'll only be found only if the
+ * task's result was persisted. Called on the node that once had the task if that node is still part of the cluster or on the
+ * coordinating node if the node is no longer part of the cluster.
+ */
+ void getFinishedTaskFromIndex(Task thisTask, GetTaskRequest request, ActionListener listener) {
+ GetRequest get = new GetRequest(TaskPersistenceService.TASK_INDEX, TaskPersistenceService.TASK_TYPE,
+ request.getTaskId().toString());
+ get.setParentTask(clusterService.localNode().getId(), thisTask.getId());
+ client.get(get, new ActionListener() {
+ @Override
+ public void onResponse(GetResponse getResponse) {
+ try {
+ onGetFinishedTaskFromIndex(getResponse, listener);
+ } catch (Exception e) {
+ listener.onFailure(e);
+ }
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
+ // We haven't yet created the index for the task results so it can't be found.
+ listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or persisted", e, request.getTaskId()));
+ } else {
+ listener.onFailure(e);
+ }
+ }
+ });
+ }
+
+ /**
+ * Called with the {@linkplain GetResponse} from loading the task from the results index. Called on the node that once had the task if
+ * that node is part of the cluster or on the coordinating node if the node wasn't part of the cluster.
+ */
+ void onGetFinishedTaskFromIndex(GetResponse response, ActionListener listener) throws IOException {
+ if (false == response.isExists()) {
+ listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or persisted", response.getId()));
+ return;
+ }
+ if (response.isSourceEmpty()) {
+ listener.onFailure(new ElasticsearchException("Stored task status for [{}] didn't contain any source!", response.getId()));
+ return;
+ }
+ try (XContentParser parser = XContentHelper.createParser(response.getSourceAsBytesRef())) {
+ PersistedTaskInfo result = PersistedTaskInfo.PARSER.apply(parser, () -> ParseFieldMatcher.STRICT);
+ listener.onResponse(new GetTaskResponse(result));
+ }
+ }
+}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java
index 446ae3affb7..6ab0bafb2fb 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java
@@ -23,21 +23,21 @@ import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.tasks.BaseTasksResponse;
import org.elasticsearch.cluster.node.DiscoveryNode;
+import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.tasks.TaskId;
+import org.elasticsearch.tasks.TaskInfo;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
-import java.util.HashSet;
import java.util.List;
import java.util.Map;
-import java.util.Set;
import java.util.stream.Collectors;
/**
@@ -47,10 +47,12 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
private List tasks;
- private Map> nodes;
+ private Map> perNodeTasks;
private List groups;
+ private DiscoveryNodes discoveryNodes;
+
public ListTasksResponse() {
}
@@ -75,28 +77,11 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
/**
* Returns the list of tasks by node
*/
- public Map> getPerNodeTasks() {
- if (nodes != null) {
- return nodes;
+ public Map> getPerNodeTasks() {
+ if (perNodeTasks == null) {
+ perNodeTasks = tasks.stream().collect(Collectors.groupingBy(t -> t.getTaskId().getNodeId()));
}
- Map> nodeTasks = new HashMap<>();
-
- Set nodes = new HashSet<>();
- for (TaskInfo shard : tasks) {
- nodes.add(shard.getNode());
- }
-
- for (DiscoveryNode node : nodes) {
- List tasks = new ArrayList<>();
- for (TaskInfo taskInfo : this.tasks) {
- if (taskInfo.getNode().equals(node)) {
- tasks.add(taskInfo);
- }
- }
- nodeTasks.put(node, tasks);
- }
- this.nodes = nodeTasks;
- return nodeTasks;
+ return perNodeTasks;
}
public List getTaskGroups() {
@@ -138,6 +123,14 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
return tasks;
}
+ /**
+ * Set a reference to the {@linkplain DiscoveryNodes}. Used for calling {@link #toXContent(XContentBuilder, ToXContent.Params)} with
+ * {@code group_by=nodes}.
+ */
+ public void setDiscoveryNodes(DiscoveryNodes discoveryNodes) {
+ this.discoveryNodes = discoveryNodes;
+ }
+
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
if (getTaskFailures() != null && getTaskFailures().size() > 0) {
@@ -161,43 +154,48 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
}
String groupBy = params.param("group_by", "nodes");
if ("nodes".equals(groupBy)) {
+ if (discoveryNodes == null) {
+ throw new IllegalStateException("discoveryNodes must be set before calling toXContent with group_by=nodes");
+ }
builder.startObject("nodes");
- for (Map.Entry> entry : getPerNodeTasks().entrySet()) {
- DiscoveryNode node = entry.getKey();
- builder.startObject(node.getId());
- builder.field("name", node.getName());
- builder.field("transport_address", node.getAddress().toString());
- builder.field("host", node.getHostName());
- builder.field("ip", node.getAddress());
+ for (Map.Entry> entry : getPerNodeTasks().entrySet()) {
+ DiscoveryNode node = discoveryNodes.get(entry.getKey());
+ builder.startObject(entry.getKey());
+ if (node != null) {
+ // If the node is no longer part of the cluster, oh well, we'll just skip it's useful information.
+ builder.field("name", node.getName());
+ builder.field("transport_address", node.getAddress().toString());
+ builder.field("host", node.getHostName());
+ builder.field("ip", node.getAddress());
- builder.startArray("roles");
- for (DiscoveryNode.Role role : node.getRoles()) {
- builder.value(role.getRoleName());
- }
- builder.endArray();
-
- if (!node.getAttributes().isEmpty()) {
- builder.startObject("attributes");
- for (Map.Entry