Merge branch 'feature/rank-eval' of github.com:elasticsearch/elasticsearch into feature/rank-eval

This commit is contained in:
Isabel Drost-Fromm 2016-07-27 15:46:44 +02:00
commit 2bb5cb83a7
521 changed files with 17650 additions and 10152 deletions

View File

@ -166,6 +166,7 @@ subprojects {
"org.elasticsearch.client:rest:${version}": ':client:rest', "org.elasticsearch.client:rest:${version}": ':client:rest',
"org.elasticsearch.client:sniffer:${version}": ':client:sniffer', "org.elasticsearch.client:sniffer:${version}": ':client:sniffer',
"org.elasticsearch.client:test:${version}": ':client:test', "org.elasticsearch.client:test:${version}": ':client:test',
"org.elasticsearch.client:transport:${version}": ':client:transport',
"org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.test:framework:${version}": ':test:framework',
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
@ -175,6 +176,7 @@ subprojects {
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage', "org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
// for transport client // for transport client
"org.elasticsearch.plugin:transport-netty3-client:${version}": ':modules:transport-netty3', "org.elasticsearch.plugin:transport-netty3-client:${version}": ':modules:transport-netty3',
"org.elasticsearch.plugin:transport-netty4-client:${version}": ':modules:transport-netty4',
"org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex', "org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex',
"org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache', "org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache',
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator', "org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',

View File

@ -297,6 +297,10 @@ class BuildPlugin implements Plugin<Project> {
url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}" url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}"
} }
} }
repos.maven {
name 'netty-snapshots'
url "http://s3.amazonaws.com/download.elasticsearch.org/nettysnapshots/20160722"
}
} }
/** Returns a closure which can be used with a MavenPom for removing transitive dependencies. */ /** Returns a closure which can be used with a MavenPom for removing transitive dependencies. */

View File

@ -23,6 +23,7 @@ import org.apache.rat.anttasks.SubstringLicenseMatcher
import org.apache.rat.license.SimpleLicenseFamily import org.apache.rat.license.SimpleLicenseFamily
import org.elasticsearch.gradle.AntTask import org.elasticsearch.gradle.AntTask
import org.gradle.api.file.FileCollection import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.Input
import org.gradle.api.tasks.OutputFile import org.gradle.api.tasks.OutputFile
import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.SourceSet
@ -44,6 +45,16 @@ public class LicenseHeadersTask extends AntTask {
*/ */
protected List<FileCollection> javaFiles protected List<FileCollection> javaFiles
/** Allowed license families for this project. */
@Input
List<String> approvedLicenses = ['Apache', 'Generated']
/**
* Additional license families that may be found. The key is the license category name (5 characters),
* followed by the family name and the value list of patterns to search for.
*/
protected Map<String, String> additionalLicenses = new HashMap<>()
LicenseHeadersTask() { LicenseHeadersTask() {
description = "Checks sources for missing, incorrect, or unacceptable license headers" description = "Checks sources for missing, incorrect, or unacceptable license headers"
// Delay resolving the dependencies until after evaluation so we pick up generated sources // Delay resolving the dependencies until after evaluation so we pick up generated sources
@ -53,6 +64,22 @@ public class LicenseHeadersTask extends AntTask {
} }
} }
/**
* Add a new license type.
*
* The license may be added to the {@link #approvedLicenses} using the {@code familyName}.
*
* @param categoryName A 5-character string identifier for the license
* @param familyName An expanded string name for the license
* @param pattern A pattern to search for, which if found, indicates a file contains the license
*/
public void additionalLicense(String categoryName, String familyName, String pattern) {
if (categoryName.length() != 5) {
throw new IllegalArgumentException("License category name must be exactly 5 characters, got ${categoryName}");
}
additionalLicenses.put(categoryName + familyName, pattern);
}
@Override @Override
protected void runAnt(AntBuilder ant) { protected void runAnt(AntBuilder ant) {
ant.project.addTaskDefinition('ratReport', Report) ant.project.addTaskDefinition('ratReport', Report)
@ -64,43 +91,54 @@ public class LicenseHeadersTask extends AntTask {
// run rat, going to the file // run rat, going to the file
List<FileCollection> input = javaFiles List<FileCollection> input = javaFiles
ant.ratReport(reportFile: reportFile.absolutePath, addDefaultLicenseMatchers: true) { ant.ratReport(reportFile: reportFile.absolutePath, addDefaultLicenseMatchers: true) {
for (FileCollection dirSet : input) { for (FileCollection dirSet : input) {
for (File dir: dirSet.srcDirs) { for (File dir: dirSet.srcDirs) {
// sometimes these dirs don't exist, e.g. site-plugin has no actual java src/main... // sometimes these dirs don't exist, e.g. site-plugin has no actual java src/main...
if (dir.exists()) { if (dir.exists()) {
ant.fileset(dir: dir) ant.fileset(dir: dir)
}
} }
} }
}
// BSD 4-clause stuff (is disallowed below) // BSD 4-clause stuff (is disallowed below)
// we keep this here, in case someone adds BSD code for some reason, it should never be allowed. // we keep this here, in case someone adds BSD code for some reason, it should never be allowed.
substringMatcher(licenseFamilyCategory: "BSD4 ", substringMatcher(licenseFamilyCategory: "BSD4 ",
licenseFamilyName: "Original BSD License (with advertising clause)") { licenseFamilyName: "Original BSD License (with advertising clause)") {
pattern(substring: "All advertising materials") pattern(substring: "All advertising materials")
} }
// Apache // Apache
substringMatcher(licenseFamilyCategory: "AL ", substringMatcher(licenseFamilyCategory: "AL ",
licenseFamilyName: "Apache") { licenseFamilyName: "Apache") {
// Apache license (ES) // Apache license (ES)
pattern(substring: "Licensed to Elasticsearch under one or more contributor") pattern(substring: "Licensed to Elasticsearch under one or more contributor")
// Apache license (ASF) // Apache license (ASF)
pattern(substring: "Licensed to the Apache Software Foundation (ASF) under") pattern(substring: "Licensed to the Apache Software Foundation (ASF) under")
// this is the old-school one under some files // this is the old-school one under some files
pattern(substring: "Licensed under the Apache License, Version 2.0 (the \"License\")") pattern(substring: "Licensed under the Apache License, Version 2.0 (the \"License\")")
} }
// Generated resources // Generated resources
substringMatcher(licenseFamilyCategory: "GEN ", substringMatcher(licenseFamilyCategory: "GEN ",
licenseFamilyName: "Generated") { licenseFamilyName: "Generated") {
// parsers generated by antlr // parsers generated by antlr
pattern(substring: "ANTLR GENERATED CODE") pattern(substring: "ANTLR GENERATED CODE")
} }
// approved categories // license types added by the project
approvedLicense(familyName: "Apache") for (Map.Entry<String, String[]> additional : additionalLicenses.entrySet()) {
approvedLicense(familyName: "Generated") String category = additional.getKey().substring(0, 5)
String family = additional.getKey().substring(5)
substringMatcher(licenseFamilyCategory: category,
licenseFamilyName: family) {
pattern(substring: additional.getValue())
}
}
// approved categories
for (String licenseFamily : approvedLicenses) {
approvedLicense(familyName: licenseFamily)
}
} }
// check the license file for any errors, this should be fast. // check the license file for any errors, this should be fast.

View File

@ -535,7 +535,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestShardsAction.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestShardsAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]get[/\\]RestMultiGetAction.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]get[/\\]RestMultiGetAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]index[/\\]RestIndexAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]script[/\\]RestDeleteIndexedScriptAction.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]script[/\\]RestDeleteIndexedScriptAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]script[/\\]RestPutIndexedScriptAction.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]script[/\\]RestPutIndexedScriptAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]search[/\\]RestClearScrollAction.java" checks="LineLength" /> <suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]search[/\\]RestClearScrollAction.java" checks="LineLength" />
@ -1106,28 +1105,15 @@
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]MoreExpressionTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]MoreExpressionTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyPlugin.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyPlugin.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IPv4RangeTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IPv4RangeTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IndexLookupTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IndexLookupTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]LongTermsTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]MinDocCountTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]MinTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]RandomScoreFunctionTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]RandomScoreFunctionTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]ScriptedMetricTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]ScriptedMetricTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SearchFieldsTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SearchFieldsTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SimpleSortTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SimpleSortTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]StringTermsTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovySecurityTests.java" checks="LineLength" /> <suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovySecurityTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]mustache[/\\]MustachePlugin.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]RenderSearchTemplateTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TemplateQueryParserTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TemplateQueryTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]mustache[/\\]MustacheScriptEngineTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-mustache[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]mustache[/\\]MustacheTests.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequest.java" checks="LineLength" /> <suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequest.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequestBuilder.java" checks="LineLength" /> <suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequestBuilder.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateShardResponse.java" checks="LineLength" /> <suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateShardResponse.java" checks="LineLength" />

View File

@ -13,7 +13,7 @@ jna = 4.2.2
randomizedrunner = 2.3.2 randomizedrunner = 2.3.2
junit = 4.11 junit = 4.11
httpclient = 4.5.2 httpclient = 4.5.2
httpcore = 4.4.4 httpcore = 4.4.5
commonslogging = 1.1.3 commonslogging = 1.1.3
commonscodec = 1.10 commonscodec = 1.10
hamcrest = 1.3 hamcrest = 1.3

View File

@ -0,0 +1,35 @@
Steps to execute the benchmark:
1. Start Elasticsearch on the target host (ideally *not* on the same machine)
2. Create an empty index with the mapping you want to benchmark
3. Start either the RestClientBenchmark class or the TransportClientBenchmark
4. Delete the index
5. Repeat steps 2. - 4. for multiple iterations. The first iterations are intended as warmup for Elasticsearch itself. Always start the same benchmark in step 3!
4. After the benchmark: Shutdown Elasticsearch and delete the data directory
Repeat all steps above for the other benchmark candidate.
Example benchmark:
* Download benchmark data from http://benchmarks.elastic.co/corpora/geonames/documents.json.bz2 and decompress
* Use the mapping file https://github.com/elastic/rally-tracks/blob/master/geonames/mappings.json to create the index
Example command line parameter list:
```
192.168.2.2 /home/your_user_name/.rally/benchmarks/data/geonames/documents.json geonames type 8647880 5000 "{ \"query\": { \"match_phrase\": { \"name\": \"Sankt Georgen\" } } }\""
```
The parameters are in order:
* Benchmark target host IP (the host where Elasticsearch is running)
* full path to the file that should be bulk indexed
* name of the index
* name of the (sole) type in the index
* number of documents in the file
* bulk size
* a search request body (remember to escape double quotes). The `TransportClientBenchmark` uses `QueryBuilders.wrapperQuery()` internally which automatically adds a root key `query`, so it must not be present in the command line parameter.
You should also define a few GC-related settings `-Xms4096M -Xmx4096M -XX:+UseConcMarkSweepGC -verbose:gc -XX:+PrintGCDetails` and keep an eye on GC activity. You can also define `-XX:+PrintCompilation` to see JIT activity.

View File

@ -0,0 +1,48 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
apply plugin: 'elasticsearch.build'
group = 'org.elasticsearch.client'
// never try to invoke tests on the benchmark project - there aren't any
check.dependsOn.remove(test)
// explicitly override the test task too in case somebody invokes 'gradle test' so it won't trip
task test(type: Test, overwrite: true)
dependencies {
compile 'org.apache.commons:commons-math3:3.2'
compile("org.elasticsearch.client:rest:${version}")
// for transport client
compile("org.elasticsearch:elasticsearch:${version}")
compile("org.elasticsearch.client:transport:${version}")
compile project(path: ':modules:transport-netty3', configuration: 'runtime')
compile project(path: ':modules:transport-netty4', configuration: 'runtime')
compile project(path: ':modules:reindex', configuration: 'runtime')
compile project(path: ':modules:lang-mustache', configuration: 'runtime')
compile project(path: ':modules:percolator', configuration: 'runtime')
}
// No licenses for our benchmark deps (we don't ship benchmarks)
dependencyLicenses.enabled = false
extraArchive {
javadoc = false
}

View File

@ -0,0 +1,88 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark;
import org.elasticsearch.client.benchmark.ops.bulk.BulkBenchmarkTask;
import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor;
import org.elasticsearch.client.benchmark.ops.search.SearchBenchmarkTask;
import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor;
import org.elasticsearch.common.SuppressForbidden;
import java.io.Closeable;
public abstract class AbstractBenchmark<T extends Closeable> {
private static final int SEARCH_BENCHMARK_ITERATIONS = 10_000;
protected abstract T client(String benchmarkTargetHost) throws Exception;
protected abstract BulkRequestExecutor bulkRequestExecutor(T client, String indexName, String typeName);
protected abstract SearchRequestExecutor searchRequestExecutor(T client, String indexName);
@SuppressForbidden(reason = "system out is ok for a command line tool")
public final void run(String[] args) throws Exception {
if (args.length < 6) {
System.err.println(
"usage: benchmarkTargetHostIp indexFilePath indexName typeName numberOfDocuments bulkSize [search request body]");
System.exit(1);
}
String benchmarkTargetHost = args[0];
String indexFilePath = args[1];
String indexName = args[2];
String typeName = args[3];
int totalDocs = Integer.valueOf(args[4]);
int bulkSize = Integer.valueOf(args[5]);
int totalIterationCount = (int) Math.floor(totalDocs / bulkSize);
// consider 40% of all iterations as warmup iterations
int warmupIterations = (int) (0.4d * totalIterationCount);
int iterations = totalIterationCount - warmupIterations;
String searchBody = (args.length == 7) ? args[6] : null;
T client = client(benchmarkTargetHost);
BenchmarkRunner benchmark = new BenchmarkRunner(warmupIterations, iterations,
new BulkBenchmarkTask(
bulkRequestExecutor(client, indexName, typeName), indexFilePath, warmupIterations + iterations, bulkSize));
try {
benchmark.run();
if (searchBody != null) {
for (int run = 1; run <= 5; run++) {
System.out.println("=============");
System.out.println(" Trial run " + run);
System.out.println("=============");
for (int throughput = 100; throughput <= 100_000; throughput *= 10) {
//request a GC between trials to reduce the likelihood of a GC occurring in the middle of a trial.
System.gc();
BenchmarkRunner searchBenchmark = new BenchmarkRunner(SEARCH_BENCHMARK_ITERATIONS, SEARCH_BENCHMARK_ITERATIONS,
new SearchBenchmarkTask(
searchRequestExecutor(client, indexName), searchBody, 2 * SEARCH_BENCHMARK_ITERATIONS, throughput));
System.out.printf("Target throughput = %d ops / s%n", throughput);
searchBenchmark.run();
}
}
}
} finally {
client.close();
}
}
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark;
import org.elasticsearch.client.benchmark.metrics.Metrics;
import org.elasticsearch.client.benchmark.metrics.MetricsCalculator;
import org.elasticsearch.client.benchmark.metrics.Sample;
import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
import org.elasticsearch.common.SuppressForbidden;
import java.util.Arrays;
import java.util.List;
import java.util.Locale;
public final class BenchmarkRunner {
private final int warmupIterations;
private final int iterations;
private final BenchmarkTask task;
public BenchmarkRunner(int warmupIterations, int iterations, BenchmarkTask task) {
this.warmupIterations = warmupIterations;
this.iterations = iterations;
this.task = task;
}
@SuppressForbidden(reason = "system out is ok for a command line tool")
public void run() throws Exception {
SampleRecorder recorder = new SampleRecorder(warmupIterations, iterations);
System.out.printf("Running %s with %d warmup iterations and %d iterations.%n",
task.getClass().getSimpleName(), warmupIterations, iterations);
try {
task.setUp(recorder);
task.run();
task.tearDown();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
return;
}
List<Sample> samples = recorder.getSamples();
final List<Metrics> summaryMetrics = MetricsCalculator.calculate(samples);
if (summaryMetrics.isEmpty()) {
System.out.println("No results.");
}
for (Metrics metrics : summaryMetrics) {
System.out.printf(Locale.ROOT, "Operation: %s%n", metrics.operation);
String stats = String.format(Locale.ROOT,
"Throughput = %f ops/s, p90 = %f ms, p95 = %f ms, p99 = %f ms, p99.9 = %f ms, p99.99 = %f ms",
metrics.throughput,
metrics.serviceTimeP90, metrics.serviceTimeP95,
metrics.serviceTimeP99, metrics.serviceTimeP999,
metrics.serviceTimeP9999);
System.out.println(repeat(stats.length(), '-'));
System.out.println(stats);
System.out.printf("success count = %d, error count = %d%n", metrics.successCount, metrics.errorCount);
System.out.println(repeat(stats.length(), '-'));
}
}
private String repeat(int times, char character) {
char[] characters = new char[times];
Arrays.fill(characters, character);
return new String(characters);
}
}

View File

@ -0,0 +1,29 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark;
import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
public interface BenchmarkTask {
void setUp(SampleRecorder sampleRecorder) throws Exception;
void run() throws Exception;
void tearDown() throws Exception;
}

View File

@ -0,0 +1,45 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.metrics;
public final class Metrics {
public final String operation;
public final long successCount;
public final long errorCount;
public final double throughput;
public final double serviceTimeP90;
public final double serviceTimeP95;
public final double serviceTimeP99;
public final double serviceTimeP999;
public final double serviceTimeP9999;
public Metrics(String operation, long successCount, long errorCount, double throughput,
double serviceTimeP90, double serviceTimeP95, double serviceTimeP99,
double serviceTimeP999, double serviceTimeP9999) {
this.operation = operation;
this.successCount = successCount;
this.errorCount = errorCount;
this.throughput = throughput;
this.serviceTimeP90 = serviceTimeP90;
this.serviceTimeP95 = serviceTimeP95;
this.serviceTimeP99 = serviceTimeP99;
this.serviceTimeP999 = serviceTimeP999;
this.serviceTimeP9999 = serviceTimeP9999;
}
}

View File

@ -0,0 +1,80 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.metrics;
import org.apache.commons.math3.stat.StatUtils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
public final class MetricsCalculator {
public static List<Metrics> calculate(Collection<Sample> samples) {
Map<String, List<Sample>> samplesPerOperation = groupByOperation(samples);
return calculateMetricsPerOperation(samplesPerOperation);
}
private static Map<String, List<Sample>> groupByOperation(Collection<Sample> samples) {
Map<String, List<Sample>> samplesPerOperation = new HashMap<>();
for (Sample sample : samples) {
if (!samplesPerOperation.containsKey(sample.getOperation())) {
samplesPerOperation.put(sample.getOperation(), new ArrayList<>());
}
samplesPerOperation.get(sample.getOperation()).add(sample);
}
return samplesPerOperation;
}
private static List<Metrics> calculateMetricsPerOperation(Map<String, List<Sample>> samplesPerOperation) {
List<Metrics> metrics = new ArrayList<>();
for (Map.Entry<String, List<Sample>> operationAndMetrics : samplesPerOperation.entrySet()) {
List<Sample> samples = operationAndMetrics.getValue();
double[] serviceTimes = new double[samples.size()];
int it = 0;
long firstStart = Long.MAX_VALUE;
long latestEnd = Long.MIN_VALUE;
for (Sample sample : samples) {
firstStart = Math.min(sample.getStartTimestamp(), firstStart);
latestEnd = Math.max(sample.getStopTimestamp(), latestEnd);
serviceTimes[it++] = sample.getServiceTime();
}
metrics.add(new Metrics(operationAndMetrics.getKey(),
samples.stream().filter((r) -> r.isSuccess()).count(),
samples.stream().filter((r) -> !r.isSuccess()).count(),
// throughput calculation is based on the total (Wall clock) time it took to generate all samples
calculateThroughput(samples.size(), latestEnd - firstStart),
// convert ns -> ms without losing precision
StatUtils.percentile(serviceTimes, 90.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 95.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 99.0d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 99.9d) / TimeUnit.MILLISECONDS.toNanos(1L),
StatUtils.percentile(serviceTimes, 99.99d) / TimeUnit.MILLISECONDS.toNanos(1L)));
}
return metrics;
}
private static double calculateThroughput(int sampleSize, double duration) {
return sampleSize * (TimeUnit.SECONDS.toNanos(1L) / duration);
}
}

View File

@ -0,0 +1,54 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.metrics;
public final class Sample {
private final String operation;
private final long startTimestamp;
private final long stopTimestamp;
private final boolean success;
public Sample(String operation, long startTimestamp, long stopTimestamp, boolean success) {
this.operation = operation;
this.startTimestamp = startTimestamp;
this.stopTimestamp = stopTimestamp;
this.success = success;
}
public String getOperation() {
return operation;
}
public boolean isSuccess() {
return success;
}
public long getStartTimestamp() {
return startTimestamp;
}
public long getStopTimestamp() {
return stopTimestamp;
}
public long getServiceTime() {
// this is *not* latency, we're not including wait time in the queue (on purpose)
return stopTimestamp - startTimestamp;
}
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.metrics;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
* Stores measurement samples.
*
* This class is NOT threadsafe.
*/
public final class SampleRecorder {
private final int warmupIterations;
private final List<Sample> samples;
private int currentIteration;
public SampleRecorder(int warmupIterations, int iterations) {
this.warmupIterations = warmupIterations;
this.samples = new ArrayList<>(iterations);
}
public void addSample(Sample sample) {
currentIteration++;
// only add samples after warmup
if (currentIteration > warmupIterations) {
samples.add(sample);
}
}
public List<Sample> getSamples() {
return Collections.unmodifiableList(samples);
}
}

View File

@ -0,0 +1,174 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.ops.bulk;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.benchmark.BenchmarkTask;
import org.elasticsearch.client.benchmark.metrics.Sample;
import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import java.io.BufferedReader;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
public class BulkBenchmarkTask implements BenchmarkTask {
private final BulkRequestExecutor requestExecutor;
private final String indexFilePath;
private final int totalIterations;
private final int bulkSize;
private LoadGenerator generator;
private ExecutorService executorService;
public BulkBenchmarkTask(BulkRequestExecutor requestExecutor, String indexFilePath, int totalIterations, int bulkSize) {
this.requestExecutor = requestExecutor;
this.indexFilePath = indexFilePath;
this.totalIterations = totalIterations;
this.bulkSize = bulkSize;
}
@Override
@SuppressForbidden(reason = "PathUtils#get is fine - we don't have environment here")
public void setUp(SampleRecorder sampleRecorder) {
BlockingQueue<List<String>> bulkQueue = new ArrayBlockingQueue<>(256);
BulkIndexer runner = new BulkIndexer(bulkQueue, totalIterations, sampleRecorder, requestExecutor);
executorService = Executors.newSingleThreadExecutor((r) -> new Thread(r, "bulk-index-runner"));
executorService.submit(runner);
generator = new LoadGenerator(PathUtils.get(indexFilePath), bulkQueue, bulkSize);
}
@Override
@SuppressForbidden(reason = "system out is ok for a command line tool")
public void run() throws Exception {
generator.execute();
// when the generator is done, there are no more data -> shutdown client
executorService.shutdown();
//We need to wait until the queue is drained
final boolean finishedNormally = executorService.awaitTermination(20, TimeUnit.MINUTES);
if (finishedNormally == false) {
System.err.println("Background tasks are still running after timeout on enclosing pool. Forcing pool shutdown.");
executorService.shutdownNow();
}
}
@Override
public void tearDown() {
//no op
}
private static final class LoadGenerator {
private final Path bulkDataFile;
private final BlockingQueue<List<String>> bulkQueue;
private final int bulkSize;
public LoadGenerator(Path bulkDataFile, BlockingQueue<List<String>> bulkQueue, int bulkSize) {
this.bulkDataFile = bulkDataFile;
this.bulkQueue = bulkQueue;
this.bulkSize = bulkSize;
}
@SuppressForbidden(reason = "Classic I/O is fine in non-production code")
public void execute() {
try (BufferedReader reader = Files.newBufferedReader(bulkDataFile, StandardCharsets.UTF_8)) {
String line;
int bulkIndex = 0;
List<String> bulkData = new ArrayList<>(bulkSize);
while ((line = reader.readLine()) != null) {
if (bulkIndex == bulkSize) {
sendBulk(bulkData);
// reset data structures
bulkData = new ArrayList<>(bulkSize);
bulkIndex = 0;
}
bulkData.add(line);
bulkIndex++;
}
// also send the last bulk:
if (bulkIndex > 0) {
sendBulk(bulkData);
}
} catch (IOException e) {
throw new ElasticsearchException(e);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
}
private void sendBulk(List<String> bulkData) throws InterruptedException {
bulkQueue.put(bulkData);
}
}
private static final class BulkIndexer implements Runnable {
private static final ESLogger logger = ESLoggerFactory.getLogger(BulkIndexer.class.getName());
private final BlockingQueue<List<String>> bulkData;
private final int totalIterations;
private final BulkRequestExecutor bulkRequestExecutor;
private final SampleRecorder sampleRecorder;
public BulkIndexer(BlockingQueue<List<String>> bulkData, int totalIterations, SampleRecorder sampleRecorder,
BulkRequestExecutor bulkRequestExecutor) {
this.bulkData = bulkData;
this.totalIterations = totalIterations;
this.bulkRequestExecutor = bulkRequestExecutor;
this.sampleRecorder = sampleRecorder;
}
@Override
public void run() {
for (int iteration = 0; iteration < totalIterations; iteration++) {
boolean success = false;
List<String> currentBulk;
try {
currentBulk = bulkData.take();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return;
}
// Yes, this approach is prone to coordinated omission *but* we have to consider that we want to benchmark a closed system
// with backpressure here instead of an open system. So this is actually correct in this case.
long start = System.nanoTime();
try {
success = bulkRequestExecutor.bulkIndex(currentBulk);
} catch (Exception ex) {
logger.warn("Error while executing bulk request", ex);
}
long stop = System.nanoTime();
sampleRecorder.addSample(new Sample("bulk", start, stop, success));
}
}
}
}

View File

@ -0,0 +1,25 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.ops.bulk;
import java.util.List;
public interface BulkRequestExecutor {
boolean bulkIndex(List<String> bulkData);
}

View File

@ -0,0 +1,86 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.ops.search;
import org.elasticsearch.client.benchmark.BenchmarkTask;
import org.elasticsearch.client.benchmark.metrics.Sample;
import org.elasticsearch.client.benchmark.metrics.SampleRecorder;
import java.util.concurrent.TimeUnit;
public class SearchBenchmarkTask implements BenchmarkTask {
private static final long MICROS_PER_SEC = TimeUnit.SECONDS.toMicros(1L);
private static final long NANOS_PER_MICRO = TimeUnit.MICROSECONDS.toNanos(1L);
private final SearchRequestExecutor searchRequestExecutor;
private final String searchRequestBody;
private final int iterations;
private final int targetThroughput;
private SampleRecorder sampleRecorder;
public SearchBenchmarkTask(SearchRequestExecutor searchRequestExecutor, String body, int iterations, int targetThroughput) {
this.searchRequestExecutor = searchRequestExecutor;
this.searchRequestBody = body;
this.iterations = iterations;
this.targetThroughput = targetThroughput;
}
@Override
public void setUp(SampleRecorder sampleRecorder) throws Exception {
this.sampleRecorder = sampleRecorder;
}
@Override
public void run() throws Exception {
for (int iteration = 0; iteration < this.iterations; iteration++) {
final long start = System.nanoTime();
boolean success = searchRequestExecutor.search(searchRequestBody);
final long stop = System.nanoTime();
sampleRecorder.addSample(new Sample("search", start, stop, success));
int waitTime = (int) Math.floor(MICROS_PER_SEC / targetThroughput - (stop - start) / NANOS_PER_MICRO);
if (waitTime > 0) {
// Thread.sleep() time is not very accurate (it's most of the time around 1 - 2 ms off)
// so we rather busy spin for the last few microseconds. Still not entirely accurate but way closer
waitMicros(waitTime);
}
}
}
private void waitMicros(int waitTime) throws InterruptedException {
int millis = waitTime / 1000;
int micros = waitTime % 1000;
if (millis > 0) {
Thread.sleep(millis);
}
// busy spin for the rest of the time
if (micros > 0) {
long end = System.nanoTime() + 1000L * micros;
while (end > System.nanoTime()) {
// busy spin
}
}
}
@Override
public void tearDown() throws Exception {
// no op
}
}

View File

@ -0,0 +1,23 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.ops.search;
public interface SearchRequestExecutor {
boolean search(String source);
}

View File

@ -0,0 +1,108 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.rest;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpStatus;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.nio.entity.NStringEntity;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.benchmark.AbstractBenchmark;
import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor;
import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
public static void main(String[] args) throws Exception {
RestClientBenchmark b = new RestClientBenchmark();
b.run(args);
}
@Override
protected RestClient client(String benchmarkTargetHost) {
return RestClient.builder(new HttpHost(benchmarkTargetHost, 9200)).build();
}
@Override
protected BulkRequestExecutor bulkRequestExecutor(RestClient client, String indexName, String typeName) {
return new RestBulkRequestExecutor(client, indexName, typeName);
}
@Override
protected SearchRequestExecutor searchRequestExecutor(RestClient client, String indexName) {
return new RestSearchRequestExecutor(client, indexName);
}
private static final class RestBulkRequestExecutor implements BulkRequestExecutor {
private final RestClient client;
private final String actionMetaData;
public RestBulkRequestExecutor(RestClient client, String index, String type) {
this.client = client;
this.actionMetaData = String.format(Locale.ROOT, "{ \"index\" : { \"_index\" : \"%s\", \"_type\" : \"%s\" } }%n", index, type);
}
@Override
public boolean bulkIndex(List<String> bulkData) {
StringBuilder bulkRequestBody = new StringBuilder();
for (String bulkItem : bulkData) {
bulkRequestBody.append(actionMetaData);
bulkRequestBody.append(bulkItem);
bulkRequestBody.append("\n");
}
HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON);
try {
Response response = client.performRequest("POST", "/geonames/type/_bulk", Collections.emptyMap(), entity);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
} catch (Exception e) {
throw new ElasticsearchException(e);
}
}
}
private static final class RestSearchRequestExecutor implements SearchRequestExecutor {
private final RestClient client;
private final String endpoint;
private RestSearchRequestExecutor(RestClient client, String indexName) {
this.client = client;
this.endpoint = "/" + indexName + "/_search";
}
@Override
public boolean search(String source) {
HttpEntity searchBody = new NStringEntity(source, StandardCharsets.UTF_8);
try {
Response response = client.performRequest("GET", endpoint, Collections.emptyMap(), searchBody);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
} catch (IOException e) {
throw new ElasticsearchException(e);
}
}
}
}

View File

@ -0,0 +1,117 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.benchmark.transport;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.client.benchmark.AbstractBenchmark;
import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor;
import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.transport.client.PreBuiltTransportClient;
import java.net.InetAddress;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.concurrent.ExecutionException;
public final class TransportClientBenchmark extends AbstractBenchmark<TransportClient> {
public static void main(String[] args) throws Exception {
TransportClientBenchmark benchmark = new TransportClientBenchmark();
benchmark.run(args);
}
@Override
protected TransportClient client(String benchmarkTargetHost) throws Exception {
TransportClient client = new PreBuiltTransportClient(Settings.EMPTY);
client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(benchmarkTargetHost), 9300));
return client;
}
@Override
protected BulkRequestExecutor bulkRequestExecutor(TransportClient client, String indexName, String typeName) {
return new TransportBulkRequestExecutor(client, indexName, typeName);
}
@Override
protected SearchRequestExecutor searchRequestExecutor(TransportClient client, String indexName) {
return new TransportSearchRequestExecutor(client, indexName);
}
private static final class TransportBulkRequestExecutor implements BulkRequestExecutor {
private final TransportClient client;
private final String indexName;
private final String typeName;
public TransportBulkRequestExecutor(TransportClient client, String indexName, String typeName) {
this.client = client;
this.indexName = indexName;
this.typeName = typeName;
}
@Override
public boolean bulkIndex(List<String> bulkData) {
BulkRequestBuilder builder = client.prepareBulk();
for (String bulkItem : bulkData) {
builder.add(new IndexRequest(indexName, typeName).source(bulkItem.getBytes(StandardCharsets.UTF_8)));
}
BulkResponse bulkResponse;
try {
bulkResponse = builder.execute().get();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
} catch (ExecutionException e) {
throw new ElasticsearchException(e);
}
return !bulkResponse.hasFailures();
}
}
private static final class TransportSearchRequestExecutor implements SearchRequestExecutor {
private final TransportClient client;
private final String indexName;
private TransportSearchRequestExecutor(TransportClient client, String indexName) {
this.client = client;
this.indexName = indexName;
}
@Override
public boolean search(String source) {
final SearchResponse response;
try {
response = client.prepareSearch(indexName).setQuery(QueryBuilders.wrapperQuery(source)).execute().get();
return response.status() == RestStatus.OK;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return false;
} catch (ExecutionException e) {
throw new ElasticsearchException(e);
}
}
}
}

View File

@ -0,0 +1,9 @@
es.logger.level=INFO
log4j.rootLogger=${es.logger.level}, out
log4j.logger.org.apache.http=INFO, out
log4j.additivity.org.apache.http=false
log4j.appender.out=org.apache.log4j.ConsoleAppender
log4j.appender.out.layout=org.apache.log4j.PatternLayout
log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n

View File

@ -31,6 +31,8 @@ group = 'org.elasticsearch.client'
dependencies { dependencies {
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
compile "org.apache.httpcomponents:httpasyncclient:4.1.2"
compile "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}"
compile "commons-codec:commons-codec:${versions.commonscodec}" compile "commons-codec:commons-codec:${versions.commonscodec}"
compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-logging:commons-logging:${versions.commonslogging}"
@ -56,6 +58,11 @@ forbiddenApisTest {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
} }
dependencyLicenses {
mapping from: /http.*/, to: 'httpclient'
mapping from: /commons-.*/, to: 'commons'
}
//JarHell is part of es core, which we don't want to pull in //JarHell is part of es core, which we don't want to pull in
jarHell.enabled=false jarHell.enabled=false

View File

@ -1,6 +0,0 @@
Apache Commons Logging
Copyright 2003-2014 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1 @@
95aa3e6fb520191a0970a73cf09f62948ee614be

View File

@ -1 +0,0 @@
b31526a230871fbe285fbcbe2813f9c0839ae9b0

View File

@ -0,0 +1 @@
e7501a1b34325abb00d17dde96150604a0658b54

View File

@ -1,558 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
=========================================================================
This project includes Public Suffix List copied from
<https://publicsuffix.org/list/effective_tld_names.dat>
licensed under the terms of the Mozilla Public License, v. 2.0
Full license text: <http://mozilla.org/MPL/2.0/>
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@ -1,6 +0,0 @@
Apache HttpComponents Client
Copyright 1999-2016 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1 @@
f4be009e7505f6ceddf21e7960c759f413f15056

View File

@ -0,0 +1,111 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.ContentTooLongException;
import org.apache.http.HttpEntity;
import org.apache.http.HttpException;
import org.apache.http.HttpResponse;
import org.apache.http.entity.ContentType;
import org.apache.http.nio.ContentDecoder;
import org.apache.http.nio.IOControl;
import org.apache.http.nio.entity.ContentBufferEntity;
import org.apache.http.nio.protocol.AbstractAsyncResponseConsumer;
import org.apache.http.nio.util.ByteBufferAllocator;
import org.apache.http.nio.util.HeapByteBufferAllocator;
import org.apache.http.nio.util.SimpleInputBuffer;
import org.apache.http.protocol.HttpContext;
import java.io.IOException;
/**
* Default implementation of {@link org.apache.http.nio.protocol.HttpAsyncResponseConsumer}. Buffers the whole
* response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response.
* Limits the size of responses that can be read to {@link #DEFAULT_BUFFER_LIMIT} by default, configurable value.
* Throws an exception in case the entity is longer than the configured buffer limit.
*/
public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer<HttpResponse> {
//default buffer limit is 10MB
public static final int DEFAULT_BUFFER_LIMIT = 10 * 1024 * 1024;
private final int bufferLimit;
private volatile HttpResponse response;
private volatile SimpleInputBuffer buf;
/**
* Creates a new instance of this consumer with a buffer limit of {@link #DEFAULT_BUFFER_LIMIT}
*/
public HeapBufferedAsyncResponseConsumer() {
this.bufferLimit = DEFAULT_BUFFER_LIMIT;
}
/**
* Creates a new instance of this consumer with the provided buffer limit
*/
public HeapBufferedAsyncResponseConsumer(int bufferLimit) {
if (bufferLimit <= 0) {
throw new IllegalArgumentException("bufferLimit must be greater than 0");
}
this.bufferLimit = bufferLimit;
}
@Override
protected void onResponseReceived(HttpResponse response) throws HttpException, IOException {
this.response = response;
}
@Override
protected void onEntityEnclosed(HttpEntity entity, ContentType contentType) throws IOException {
long len = entity.getContentLength();
if (len > bufferLimit) {
throw new ContentTooLongException("entity content is too long [" + len +
"] for the configured buffer limit [" + bufferLimit + "]");
}
if (len < 0) {
len = 4096;
}
this.buf = new SimpleInputBuffer((int) len, getByteBufferAllocator());
this.response.setEntity(new ContentBufferEntity(entity, this.buf));
}
/**
* Returns the instance of {@link ByteBufferAllocator} to use for content buffering.
* Allows to plug in any {@link ByteBufferAllocator} implementation.
*/
protected ByteBufferAllocator getByteBufferAllocator() {
return HeapByteBufferAllocator.INSTANCE;
}
@Override
protected void onContentReceived(ContentDecoder decoder, IOControl ioctrl) throws IOException {
this.buf.consumeContent(decoder);
}
@Override
protected HttpResponse buildResult(HttpContext context) throws Exception {
return response;
}
@Override
protected void releaseResources() {
response = null;
}
}

View File

@ -81,7 +81,7 @@ final class RequestLogger {
/** /**
* Logs a request that failed * Logs a request that failed
*/ */
static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, IOException e) { static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, Exception e) {
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e); logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e);
} }

View File

@ -22,26 +22,23 @@ package org.elasticsearch.client;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.RequestLine; import org.apache.http.RequestLine;
import org.apache.http.StatusLine; import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import java.io.Closeable;
import java.io.IOException;
import java.util.Objects; import java.util.Objects;
/** /**
* Holds an elasticsearch response. It wraps the {@link CloseableHttpResponse} response and associates it with * Holds an elasticsearch response. It wraps the {@link HttpResponse} returned and associates it with
* its corresponding {@link RequestLine} and {@link HttpHost}. * its corresponding {@link RequestLine} and {@link HttpHost}.
* It must be closed to free any resource held by it, as well as the corresponding connection in the connection pool.
*/ */
public class Response implements Closeable { public final class Response {
private final RequestLine requestLine; private final RequestLine requestLine;
private final HttpHost host; private final HttpHost host;
private final CloseableHttpResponse response; private final HttpResponse response;
Response(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) { Response(RequestLine requestLine, HttpHost host, HttpResponse response) {
Objects.requireNonNull(requestLine, "requestLine cannot be null"); Objects.requireNonNull(requestLine, "requestLine cannot be null");
Objects.requireNonNull(host, "node cannot be null"); Objects.requireNonNull(host, "node cannot be null");
Objects.requireNonNull(response, "response cannot be null"); Objects.requireNonNull(response, "response cannot be null");
@ -107,9 +104,4 @@ public class Response implements Closeable {
", response=" + response.getStatusLine() + ", response=" + response.getStatusLine() +
'}'; '}';
} }
@Override
public void close() throws IOException {
this.response.close();
}
} }

View File

@ -23,44 +23,26 @@ import java.io.IOException;
/** /**
* Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error. * Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error.
* Note that the response body gets passed in as a string and read eagerly, which means that the Response object * Holds the response that was returned.
* is expected to be closed and available only to read metadata like status line, request line, response headers.
*/ */
public class ResponseException extends IOException { public final class ResponseException extends IOException {
private Response response; private Response response;
private final String responseBody;
ResponseException(Response response, String responseBody) throws IOException { ResponseException(Response response) throws IOException {
super(buildMessage(response,responseBody)); super(buildMessage(response));
this.response = response; this.response = response;
this.responseBody = responseBody;
} }
private static String buildMessage(Response response, String responseBody) { private static String buildMessage(Response response) {
String message = response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri() return response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri()
+ ": " + response.getStatusLine().toString(); + ": " + response.getStatusLine().toString();
if (responseBody != null) {
message += "\n" + responseBody;
}
return message;
} }
/** /**
* Returns the {@link Response} that caused this exception to be thrown. * Returns the {@link Response} that caused this exception to be thrown.
* Expected to be used only to read metadata like status line, request line, response headers. The response body should
* be retrieved using {@link #getResponseBody()}
*/ */
public Response getResponse() { public Response getResponse() {
return response; return response;
} }
/**
* Returns the response body as a string or null if there wasn't any.
* The body is eagerly consumed when an ResponseException gets created, and its corresponding Response
* gets closed straightaway so this method is the only way to get back the response body that was returned.
*/
public String getResponseBody() {
return responseBody;
}
} }

View File

@ -19,24 +19,22 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.message.BasicHttpResponse;
import java.io.IOException;
/** /**
* Simple {@link CloseableHttpResponse} impl needed to easily create http responses that are closeable given that * Listener to be provided when calling async performRequest methods provided by {@link RestClient}.
* org.apache.http.impl.execchain.HttpResponseProxy is not public. * Those methods that do accept a listener will return immediately, execute asynchronously, and notify
* the listener whenever the request yielded a response, or failed with an exception.
*/ */
class CloseableBasicHttpResponse extends BasicHttpResponse implements CloseableHttpResponse { public interface ResponseListener {
public CloseableBasicHttpResponse(StatusLine statusline) { /**
super(statusline); * Method invoked if the request yielded a successful response
} */
void onSuccess(Response response);
@Override /**
public void close() throws IOException { * Method invoked if the request failed. There are two main categories of failures: connection failures (usually
//nothing to close * {@link java.io.IOException}s, or responses that were treated as errors based on their error response code
} * ({@link ResponseException}s).
*/
void onFailure(Exception exception);
} }

View File

@ -20,14 +20,12 @@ package org.elasticsearch.client;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.http.Consts;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.HttpRequest; import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException; import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
import org.apache.http.client.methods.HttpHead; import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpOptions; import org.apache.http.client.methods.HttpOptions;
@ -37,20 +35,22 @@ import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.client.methods.HttpTrace; import org.apache.http.client.methods.HttpTrace;
import org.apache.http.client.utils.URIBuilder; import org.apache.http.client.utils.URIBuilder;
import org.apache.http.entity.ContentType; import org.apache.http.concurrent.FutureCallback;
import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.nio.client.methods.HttpAsyncMethods;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
import org.apache.http.util.EntityUtils; import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
import java.util.Map; import java.util.Map;
@ -58,30 +58,31 @@ import java.util.Objects;
import java.util.Set; import java.util.Set;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
/** /**
* Client that connects to an elasticsearch cluster through http. * Client that connects to an elasticsearch cluster through http.
* Must be created using {@link Builder}, which allows to set all the different options or just rely on defaults. * Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults.
* The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later
* by calling {@link #setHosts(HttpHost...)}. * by calling {@link #setHosts(HttpHost...)}.
* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When * The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When
* sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and
* retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously * retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously
* failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that * failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that
* deserve a retry) are retried till one responds or none of them does, in which case an {@link IOException} will be thrown. * deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown.
* *
* Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format. * Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format.
*/ */
public final class RestClient implements Closeable { public final class RestClient implements Closeable {
private static final Log logger = LogFactory.getLog(RestClient.class); private static final Log logger = LogFactory.getLog(RestClient.class);
public static ContentType JSON_CONTENT_TYPE = ContentType.create("application/json", Consts.UTF_8);
private final CloseableHttpClient client; private final CloseableHttpAsyncClient client;
//we don't rely on default headers supported by HttpClient as those cannot be replaced, plus it would get hairy //we don't rely on default headers supported by HttpAsyncClient as those cannot be replaced
//when we create the HttpClient instance on our own as there would be two different ways to set the default headers.
private final Header[] defaultHeaders; private final Header[] defaultHeaders;
private final long maxRetryTimeoutMillis; private final long maxRetryTimeoutMillis;
private final AtomicInteger lastHostIndex = new AtomicInteger(0); private final AtomicInteger lastHostIndex = new AtomicInteger(0);
@ -89,7 +90,7 @@ public final class RestClient implements Closeable {
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>(); private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
private final FailureListener failureListener; private final FailureListener failureListener;
RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
HttpHost[] hosts, FailureListener failureListener) { HttpHost[] hosts, FailureListener failureListener) {
this.client = client; this.client = client;
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
@ -98,6 +99,13 @@ public final class RestClient implements Closeable {
setHosts(hosts); setHosts(hosts);
} }
/**
* Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
*/
public static RestClientBuilder builder(HttpHost... hosts) {
return new RestClientBuilder(hosts);
}
/** /**
* Replaces the hosts that the client communicates with. * Replaces the hosts that the client communicates with.
* @see HttpHost * @see HttpHost
@ -116,8 +124,9 @@ public final class RestClient implements Closeable {
} }
/** /**
* Sends a request to the elasticsearch cluster that the current client points to. * Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters and request body. * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters
* and request body.
* *
* @param method the http method * @param method the http method
* @param endpoint the path of the request (without host and port) * @param endpoint the path of the request (without host and port)
@ -128,12 +137,12 @@ public final class RestClient implements Closeable {
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error * @throws ResponseException in case elasticsearch responded with a status code that indicated an error
*/ */
public Response performRequest(String method, String endpoint, Header... headers) throws IOException { public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, headers); return performRequest(method, endpoint, Collections.<String, String>emptyMap(), (HttpEntity)null, headers);
} }
/** /**
* Sends a request to the elasticsearch cluster that the current client points to. * Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body. * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body.
* *
* @param method the http method * @param method the http method
* @param endpoint the path of the request (without host and port) * @param endpoint the path of the request (without host and port)
@ -145,15 +154,14 @@ public final class RestClient implements Closeable {
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error * @throws ResponseException in case elasticsearch responded with a status code that indicated an error
*/ */
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException { public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
return performRequest(method, endpoint, params, null, headers); return performRequest(method, endpoint, params, (HttpEntity)null, headers);
} }
/** /**
* Sends a request to the elasticsearch cluster that the current client points to. * Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain * to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, Header...)}
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures, * which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
* the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried * will be used to consume the response body.
* till one responds or none of them does, in which case an {@link IOException} will be thrown.
* *
* @param method the http method * @param method the http method
* @param endpoint the path of the request (without host and port) * @param endpoint the path of the request (without host and port)
@ -167,72 +175,183 @@ public final class RestClient implements Closeable {
*/ */
public Response performRequest(String method, String endpoint, Map<String, String> params, public Response performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, Header... headers) throws IOException { HttpEntity entity, Header... headers) throws IOException {
HttpAsyncResponseConsumer<HttpResponse> responseConsumer = new HeapBufferedAsyncResponseConsumer();
return performRequest(method, endpoint, params, entity, responseConsumer, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to. Blocks until the request is completed and returns
* its response or fails by throwing an exception. Selects a host out of the provided ones in a round-robin fashion. Failing hosts
* are marked dead and retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times
* they previously failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead
* nodes that deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param entity the body of the request, null if not applicable
* @param responseConsumer the {@link HttpAsyncResponseConsumer} callback. Controls how the response
* body gets streamed from a non-blocking HTTP connection on the client side.
* @param headers the optional request headers
* @return the response returned by elasticsearch
* @throws IOException in case of a problem or the connection was aborted
* @throws ClientProtocolException in case of an http protocol error
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
*/
public Response performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
Header... headers) throws IOException {
SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis);
performRequest(method, endpoint, params, entity, responseConsumer, listener, headers);
return listener.get();
}
/**
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
* {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
public void performRequest(String method, String endpoint, ResponseListener responseListener, Header... headers) {
performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
* {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
public void performRequest(String method, String endpoint, Map<String, String> params,
ResponseListener responseListener, Header... headers) {
performRequest(method, endpoint, params, null, responseListener, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
* the provided {@link ResponseListener} will be notified upon completion or failure.
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, ResponseListener, Header...)}
* which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
* will be used to consume the response body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param entity the body of the request, null if not applicable
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
public void performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, ResponseListener responseListener, Header... headers) {
HttpAsyncResponseConsumer<HttpResponse> responseConsumer = new HeapBufferedAsyncResponseConsumer();
performRequest(method, endpoint, params, entity, responseConsumer, responseListener, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to. The request is executed asynchronously
* and the provided {@link ResponseListener} gets notified upon request completion or failure.
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures,
* the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried
* until one responds or none of them does, in which case an {@link IOException} will be thrown.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param entity the body of the request, null if not applicable
* @param responseConsumer the {@link HttpAsyncResponseConsumer} callback. Controls how the response
* body gets streamed from a non-blocking HTTP connection on the client side.
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
public void performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
ResponseListener responseListener, Header... headers) {
URI uri = buildUri(endpoint, params); URI uri = buildUri(endpoint, params);
HttpRequestBase request = createHttpRequest(method, uri, entity); HttpRequestBase request = createHttpRequest(method, uri, entity);
setHeaders(request, headers); setHeaders(request, headers);
//we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
long retryTimeoutMillis = Math.round(this.maxRetryTimeoutMillis / (float)100 * 98);
IOException lastSeenException = null;
long startTime = System.nanoTime(); long startTime = System.nanoTime();
for (HttpHost host : nextHost()) { performRequest(startTime, nextHost().iterator(), request, responseConsumer, failureTrackingResponseListener);
if (lastSeenException != null) { }
//in case we are retrying, check whether maxRetryTimeout has been reached, in which case an exception will be thrown
long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); private void performRequest(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request,
long timeout = retryTimeoutMillis - timeElapsedMillis; final HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
if (timeout <= 0) { final FailureTrackingResponseListener listener) {
IOException retryTimeoutException = new IOException( final HttpHost host = hosts.next();
"request retries exceeded max retry timeout [" + retryTimeoutMillis + "]"); //we stream the request body if the entity allows for it
retryTimeoutException.addSuppressed(lastSeenException); HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request);
throw retryTimeoutException; client.execute(requestProducer, responseConsumer, new FutureCallback<HttpResponse>() {
@Override
public void completed(HttpResponse httpResponse) {
try {
RequestLogger.logResponse(logger, request, host, httpResponse);
int statusCode = httpResponse.getStatusLine().getStatusCode();
Response response = new Response(request.getRequestLine(), host, httpResponse);
if (isSuccessfulResponse(request.getMethod(), statusCode)) {
onResponse(host);
listener.onSuccess(response);
} else {
ResponseException responseException = new ResponseException(response);
if (isRetryStatus(statusCode)) {
//mark host dead and retry against next one
onFailure(host);
retryIfPossible(responseException, hosts, request);
} else {
//mark host alive and don't retry, as the error should be a request problem
onResponse(host);
listener.onDefinitiveFailure(responseException);
}
}
} catch(Exception e) {
listener.onDefinitiveFailure(e);
} }
//also reset the request to make it reusable for the next attempt
request.reset();
} }
CloseableHttpResponse httpResponse; @Override
try { public void failed(Exception failure) {
httpResponse = client.execute(host, request); try {
} catch(IOException e) { RequestLogger.logFailedRequest(logger, request, host, failure);
RequestLogger.logFailedRequest(logger, request, host, e);
onFailure(host);
lastSeenException = addSuppressedException(lastSeenException, e);
continue;
}
Response response = new Response(request.getRequestLine(), host, httpResponse);
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode < 300 || (request.getMethod().equals(HttpHead.METHOD_NAME) && statusCode == 404) ) {
RequestLogger.logResponse(logger, request, host, httpResponse);
onResponse(host);
return response;
}
RequestLogger.logResponse(logger, request, host, httpResponse);
String responseBody;
try {
if (response.getEntity() == null) {
responseBody = null;
} else {
responseBody = EntityUtils.toString(response.getEntity());
}
} finally {
response.close();
}
lastSeenException = addSuppressedException(lastSeenException, new ResponseException(response, responseBody));
switch(statusCode) {
case 502:
case 503:
case 504:
//mark host dead and retry against next one
onFailure(host); onFailure(host);
break; retryIfPossible(failure, hosts, request);
default: } catch(Exception e) {
//mark host alive and don't retry, as the error should be a request problem listener.onDefinitiveFailure(e);
onResponse(host); }
throw lastSeenException;
} }
}
//we get here only when we tried all nodes and they all failed private void retryIfPossible(Exception exception, Iterator<HttpHost> hosts, HttpRequestBase request) {
assert lastSeenException != null; if (hosts.hasNext()) {
throw lastSeenException; //in case we are retrying, check whether maxRetryTimeout has been reached
long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
long timeout = maxRetryTimeoutMillis - timeElapsedMillis;
if (timeout <= 0) {
IOException retryTimeoutException = new IOException(
"request retries exceeded max retry timeout [" + maxRetryTimeoutMillis + "]");
listener.onDefinitiveFailure(retryTimeoutException);
} else {
listener.trackFailure(exception);
request.reset();
performRequest(startTime, hosts, request, responseConsumer, listener);
}
} else {
listener.onDefinitiveFailure(exception);
}
}
@Override
public void cancelled() {
listener.onDefinitiveFailure(new ExecutionException("request was cancelled", null));
}
});
} }
private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) { private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) {
@ -247,38 +366,43 @@ public final class RestClient implements Closeable {
} }
/** /**
* Returns an iterator of hosts to be used for a request call. * Returns an {@link Iterable} of hosts to be used for a request call.
* Ideally, the first host is retrieved from the iterator and used successfully for the request. * Ideally, the first host is retrieved from the iterable and used successfully for the request.
* Otherwise, after each failure the next host should be retrieved from the iterator so that the request can be retried till * Otherwise, after each failure the next host has to be retrieved from the iterator so that the request can be retried until
* the iterator is exhausted. The maximum total of attempts is equal to the number of hosts that are available in the iterator. * there are no more hosts available to retry against. The maximum total of attempts is equal to the number of hosts in the iterable.
* The iterator returned will never be empty, rather an {@link IllegalStateException} in case there are no hosts. * The iterator returned will never be empty. In case there are no healthy hosts available, or dead ones to be be retried,
* In case there are no healthy hosts available, or dead ones to be be retried, one dead host gets returned. * one dead host gets returned so that it can be retried.
*/ */
private Iterable<HttpHost> nextHost() { private Iterable<HttpHost> nextHost() {
Set<HttpHost> filteredHosts = new HashSet<>(hosts); Collection<HttpHost> nextHosts = Collections.emptySet();
for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) { do {
if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) { Set<HttpHost> filteredHosts = new HashSet<>(hosts);
filteredHosts.remove(entry.getKey()); for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) {
} if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) {
} filteredHosts.remove(entry.getKey());
if (filteredHosts.isEmpty()) {
//last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried
List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet());
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
@Override
public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos());
} }
}); }
HttpHost deadHost = sortedHosts.get(0).getKey(); if (filteredHosts.isEmpty()) {
logger.trace("resurrecting host [" + deadHost + "]"); //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried
return Collections.singleton(deadHost); List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet());
} if (sortedHosts.size() > 0) {
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
List<HttpHost> rotatedHosts = new ArrayList<>(filteredHosts); @Override
Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement()); public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
return rotatedHosts; return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos());
}
});
HttpHost deadHost = sortedHosts.get(0).getKey();
logger.trace("resurrecting host [" + deadHost + "]");
nextHosts = Collections.singleton(deadHost);
}
} else {
List<HttpHost> rotatedHosts = new ArrayList<>(filteredHosts);
Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement());
nextHosts = rotatedHosts;
}
} while(nextHosts.isEmpty());
return nextHosts;
} }
/** /**
@ -316,7 +440,21 @@ public final class RestClient implements Closeable {
client.close(); client.close();
} }
private static IOException addSuppressedException(IOException suppressedException, IOException currentException) { private static boolean isSuccessfulResponse(String method, int statusCode) {
return statusCode < 300 || (HttpHead.METHOD_NAME.equals(method) && statusCode == 404);
}
private static boolean isRetryStatus(int statusCode) {
switch(statusCode) {
case 502:
case 503:
case 504:
return true;
}
return false;
}
private static Exception addSuppressedException(Exception suppressedException, Exception currentException) {
if (suppressedException != null) { if (suppressedException != null) {
currentException.addSuppressed(suppressedException); currentException.addSuppressed(suppressedException);
} }
@ -373,156 +511,114 @@ public final class RestClient implements Closeable {
} }
/** /**
* Returns a new {@link Builder} to help with {@link RestClient} creation. * Listener used in any async call to wrap the provided user listener (or SyncResponseListener in sync calls).
* Allows to track potential failures coming from the different retry attempts and returning to the original listener
* only when we got a response (successful or not to be retried) or there are no hosts to retry against.
*/ */
public static Builder builder(HttpHost... hosts) { static class FailureTrackingResponseListener {
return new Builder(hosts); private final ResponseListener responseListener;
} private volatile Exception exception;
/** FailureTrackingResponseListener(ResponseListener responseListener) {
* Rest client builder. Helps creating a new {@link RestClient}. this.responseListener = responseListener;
*/
public static final class Builder {
public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000;
public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 10000;
public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS;
public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS = 500;
private static final Header[] EMPTY_HEADERS = new Header[0];
private final HttpHost[] hosts;
private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
private Header[] defaultHeaders = EMPTY_HEADERS;
private FailureListener failureListener;
private HttpClientConfigCallback httpClientConfigCallback;
private RequestConfigCallback requestConfigCallback;
/**
* Creates a new builder instance and sets the hosts that the client will send requests to.
*/
private Builder(HttpHost... hosts) {
if (hosts == null || hosts.length == 0) {
throw new IllegalArgumentException("no hosts provided");
}
this.hosts = hosts;
} }
/** /**
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request. * Notifies the caller of a response through the wrapped listener
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
*
* @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0
*/ */
public Builder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) { void onSuccess(Response response) {
if (maxRetryTimeoutMillis <= 0) { responseListener.onSuccess(response);
throw new IllegalArgumentException("maxRetryTimeoutMillis must be greater than 0");
}
this.maxRetryTimeout = maxRetryTimeoutMillis;
return this;
} }
/** /**
* Sets the default request headers, to be used sent with every request unless overridden on a per request basis * Tracks one last definitive failure and returns to the caller by notifying the wrapped listener
*/ */
public Builder setDefaultHeaders(Header[] defaultHeaders) { void onDefinitiveFailure(Exception exception) {
Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null"); trackFailure(exception);
for (Header defaultHeader : defaultHeaders) { responseListener.onFailure(this.exception);
Objects.requireNonNull(defaultHeader, "default header must not be null");
}
this.defaultHeaders = defaultHeaders;
return this;
} }
/** /**
* Sets the {@link FailureListener} to be notified for each request failure * Tracks an exception, which caused a retry hence we should not return yet to the caller
*/ */
public Builder setFailureListener(FailureListener failureListener) { void trackFailure(Exception exception) {
Objects.requireNonNull(failureListener, "failureListener must not be null"); this.exception = addSuppressedException(this.exception, exception);
this.failureListener = failureListener;
return this;
}
/**
* Sets the {@link HttpClientConfigCallback} to be used to customize http client configuration
*/
public Builder setHttpClientConfigCallback(HttpClientConfigCallback httpClientConfigCallback) {
Objects.requireNonNull(httpClientConfigCallback, "httpClientConfigCallback must not be null");
this.httpClientConfigCallback = httpClientConfigCallback;
return this;
}
/**
* Sets the {@link RequestConfigCallback} to be used to customize http client configuration
*/
public Builder setRequestConfigCallback(RequestConfigCallback requestConfigCallback) {
Objects.requireNonNull(requestConfigCallback, "requestConfigCallback must not be null");
this.requestConfigCallback = requestConfigCallback;
return this;
}
/**
* Creates a new {@link RestClient} based on the provided configuration.
*/
public RestClient build() {
if (failureListener == null) {
failureListener = new FailureListener();
}
CloseableHttpClient httpClient = createHttpClient();
return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
}
private CloseableHttpClient createHttpClient() {
//default timeouts are all infinite
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS);
if (requestConfigCallback != null) {
requestConfigCallback.customizeRequestConfig(requestConfigBuilder);
}
RequestConfig requestConfig = requestConfigBuilder.build();
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
//default settings may be too constraining
connectionManager.setDefaultMaxPerRoute(10);
connectionManager.setMaxTotal(30);
HttpClientBuilder httpClientBuilder = HttpClientBuilder.create().setConnectionManager(connectionManager)
.setDefaultRequestConfig(requestConfig);
if (httpClientConfigCallback != null) {
httpClientConfigCallback.customizeHttpClient(httpClientBuilder);
}
return httpClientBuilder.build();
} }
} }
/** /**
* Callback used the default {@link RequestConfig} being set to the {@link CloseableHttpClient} * Listener used in any sync performRequest calls, it waits for a response or an exception back up to a timeout
* @see HttpClientBuilder#setDefaultRequestConfig
*/ */
public interface RequestConfigCallback { static class SyncResponseListener implements ResponseListener {
/** private final CountDownLatch latch = new CountDownLatch(1);
* Allows to customize the {@link RequestConfig} that will be used with each request. private final AtomicReference<Response> response = new AtomicReference<>();
* It is common to customize the different timeout values through this method without losing any other useful default private final AtomicReference<Exception> exception = new AtomicReference<>();
* value that the {@link RestClient.Builder} internally sets.
*/ private final long timeout;
void customizeRequestConfig(RequestConfig.Builder requestConfigBuilder);
} SyncResponseListener(long timeout) {
assert timeout > 0;
this.timeout = timeout;
}
@Override
public void onSuccess(Response response) {
Objects.requireNonNull(response, "response must not be null");
boolean wasResponseNull = this.response.compareAndSet(null, response);
if (wasResponseNull == false) {
throw new IllegalStateException("response is already set");
}
latch.countDown();
}
@Override
public void onFailure(Exception exception) {
Objects.requireNonNull(exception, "exception must not be null");
boolean wasExceptionNull = this.exception.compareAndSet(null, exception);
if (wasExceptionNull == false) {
throw new IllegalStateException("exception is already set");
}
latch.countDown();
}
/**
* Callback used to customize the {@link CloseableHttpClient} instance used by a {@link RestClient} instance.
* Allows to customize default {@link RequestConfig} being set to the client and any parameter that
* can be set through {@link HttpClientBuilder}
*/
public interface HttpClientConfigCallback {
/** /**
* Allows to customize the {@link CloseableHttpClient} being created and used by the {@link RestClient}. * Waits (up to a timeout) for some result of the request: either a response, or an exception.
* It is common to customzie the default {@link org.apache.http.client.CredentialsProvider} through this method,
* without losing any other useful default value that the {@link RestClient.Builder} internally sets.
* Also useful to setup ssl through {@link SSLSocketFactoryHttpConfigCallback}.
*/ */
void customizeHttpClient(HttpClientBuilder httpClientBuilder); Response get() throws IOException {
try {
//providing timeout is just a safety measure to prevent everlasting waits
//the different client timeouts should already do their jobs
if (latch.await(timeout, TimeUnit.MILLISECONDS) == false) {
throw new IOException("listener timeout after waiting for [" + timeout + "] ms");
}
} catch (InterruptedException e) {
throw new RuntimeException("thread waiting for the response was interrupted", e);
}
Exception exception = this.exception.get();
Response response = this.response.get();
if (exception != null) {
if (response != null) {
IllegalStateException e = new IllegalStateException("response and exception are unexpectedly set at the same time");
e.addSuppressed(exception);
throw e;
}
//try and leave the exception untouched as much as possible but we don't want to just add throws Exception clause everywhere
if (exception instanceof IOException) {
throw (IOException) exception;
}
if (exception instanceof RuntimeException){
throw (RuntimeException) exception;
}
throw new RuntimeException("error while performing request", exception);
}
if (response == null) {
throw new IllegalStateException("response not set and no exception caught either");
}
return response;
}
} }
/** /**
@ -533,7 +629,7 @@ public final class RestClient implements Closeable {
/** /**
* Notifies that the host provided as argument has just failed * Notifies that the host provided as argument has just failed
*/ */
public void onFailure(HttpHost host) throws IOException { public void onFailure(HttpHost host) {
} }
} }

View File

@ -0,0 +1,179 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.nio.conn.SchemeIOSessionStrategy;
import java.util.Objects;
/**
* Helps creating a new {@link RestClient}. Allows to set the most common http client configuration options when internally
* creating the underlying {@link org.apache.http.nio.client.HttpAsyncClient}. Also allows to provide an externally created
* {@link org.apache.http.nio.client.HttpAsyncClient} in case additional customization is needed.
*/
public final class RestClientBuilder {
public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000;
public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 10000;
public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS;
public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS = 500;
public static final int DEFAULT_MAX_CONN_PER_ROUTE = 10;
public static final int DEFAULT_MAX_CONN_TOTAL = 30;
private static final Header[] EMPTY_HEADERS = new Header[0];
private final HttpHost[] hosts;
private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
private Header[] defaultHeaders = EMPTY_HEADERS;
private RestClient.FailureListener failureListener;
private HttpClientConfigCallback httpClientConfigCallback;
private RequestConfigCallback requestConfigCallback;
/**
* Creates a new builder instance and sets the hosts that the client will send requests to.
*/
RestClientBuilder(HttpHost... hosts) {
if (hosts == null || hosts.length == 0) {
throw new IllegalArgumentException("no hosts provided");
}
for (HttpHost host : hosts) {
Objects.requireNonNull(host, "host cannot be null");
}
this.hosts = hosts;
}
/**
* Sets the default request headers, which will be sent along with each request
*/
public RestClientBuilder setDefaultHeaders(Header[] defaultHeaders) {
Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null");
for (Header defaultHeader : defaultHeaders) {
Objects.requireNonNull(defaultHeader, "default header must not be null");
}
this.defaultHeaders = defaultHeaders;
return this;
}
/**
* Sets the {@link RestClient.FailureListener} to be notified for each request failure
*/
public RestClientBuilder setFailureListener(RestClient.FailureListener failureListener) {
Objects.requireNonNull(failureListener, "failureListener must not be null");
this.failureListener = failureListener;
return this;
}
/**
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
*
* @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0
*/
public RestClientBuilder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) {
if (maxRetryTimeoutMillis <= 0) {
throw new IllegalArgumentException("maxRetryTimeoutMillis must be greater than 0");
}
this.maxRetryTimeout = maxRetryTimeoutMillis;
return this;
}
/**
* Sets the {@link HttpClientConfigCallback} to be used to customize http client configuration
*/
public RestClientBuilder setHttpClientConfigCallback(HttpClientConfigCallback httpClientConfigCallback) {
Objects.requireNonNull(httpClientConfigCallback, "httpClientConfigCallback must not be null");
this.httpClientConfigCallback = httpClientConfigCallback;
return this;
}
/**
* Sets the {@link RequestConfigCallback} to be used to customize http client configuration
*/
public RestClientBuilder setRequestConfigCallback(RequestConfigCallback requestConfigCallback) {
Objects.requireNonNull(requestConfigCallback, "requestConfigCallback must not be null");
this.requestConfigCallback = requestConfigCallback;
return this;
}
/**
* Creates a new {@link RestClient} based on the provided configuration.
*/
public RestClient build() {
if (failureListener == null) {
failureListener = new RestClient.FailureListener();
}
CloseableHttpAsyncClient httpClient = createHttpClient();
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
httpClient.start();
return restClient;
}
private CloseableHttpAsyncClient createHttpClient() {
//default timeouts are all infinite
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS);
if (requestConfigCallback != null) {
requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder);
}
HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create().setDefaultRequestConfig(requestConfigBuilder.build())
//default settings for connection pooling may be too constraining
.setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE).setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL);
if (httpClientConfigCallback != null) {
httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder);
}
return httpClientBuilder.build();
}
/**
* Callback used the default {@link RequestConfig} being set to the {@link CloseableHttpClient}
* @see HttpClientBuilder#setDefaultRequestConfig
*/
public interface RequestConfigCallback {
/**
* Allows to customize the {@link RequestConfig} that will be used with each request.
* It is common to customize the different timeout values through this method without losing any other useful default
* value that the {@link RestClientBuilder} internally sets.
*/
RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder);
}
/**
* Callback used to customize the {@link CloseableHttpClient} instance used by a {@link RestClient} instance.
* Allows to customize default {@link RequestConfig} being set to the client and any parameter that
* can be set through {@link HttpClientBuilder}
*/
public interface HttpClientConfigCallback {
/**
* Allows to customize the {@link CloseableHttpAsyncClient} being created and used by the {@link RestClient}.
* Commonly used to customize the default {@link org.apache.http.client.CredentialsProvider} for authentication
* or the {@link SchemeIOSessionStrategy} for communication through ssl without losing any other useful default
* value that the {@link RestClientBuilder} internally sets, like connection pooling.
*/
HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder);
}
}

View File

@ -1,53 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
/**
* Helps configuring the http client when needing to communicate over ssl. It effectively replaces the connection manager
* with one that has ssl properly configured thanks to the provided {@link SSLConnectionSocketFactory}.
*/
public class SSLSocketFactoryHttpConfigCallback implements RestClient.HttpClientConfigCallback {
private final SSLConnectionSocketFactory sslSocketFactory;
public SSLSocketFactoryHttpConfigCallback(SSLConnectionSocketFactory sslSocketFactory) {
this.sslSocketFactory = sslSocketFactory;
}
@Override
public void customizeHttpClient(HttpClientBuilder httpClientBuilder) {
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder.<ConnectionSocketFactory>create()
.register("http", PlainConnectionSocketFactory.getSocketFactory())
.register("https", sslSocketFactory).build();
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
//default settings may be too constraining
connectionManager.setDefaultMaxPerRoute(10);
connectionManager.setMaxTotal(30);
httpClientBuilder.setConnectionManager(connectionManager);
}
}

View File

@ -0,0 +1,107 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.RequestLine;
import org.apache.http.StatusLine;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicRequestLine;
import org.apache.http.message.BasicStatusLine;
import java.util.concurrent.atomic.AtomicReference;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
public class FailureTrackingResponseListenerTests extends RestClientTestCase {
public void testOnSuccess() {
MockResponseListener responseListener = new MockResponseListener();
RestClient.FailureTrackingResponseListener listener = new RestClient.FailureTrackingResponseListener(responseListener);
}
public void testOnFailure() {
MockResponseListener responseListener = new MockResponseListener();
RestClient.FailureTrackingResponseListener listener = new RestClient.FailureTrackingResponseListener(responseListener);
int numIters = randomIntBetween(1, 10);
Exception[] expectedExceptions = new Exception[numIters];
for (int i = 0; i < numIters; i++) {
RuntimeException runtimeException = new RuntimeException("test" + i);
expectedExceptions[i] = runtimeException;
listener.trackFailure(runtimeException);
assertNull(responseListener.response.get());
assertNull(responseListener.exception.get());
}
if (randomBoolean()) {
Response response = mockResponse();
listener.onSuccess(response);
assertSame(response, responseListener.response.get());
assertNull(responseListener.exception.get());
} else {
RuntimeException runtimeException = new RuntimeException("definitive");
listener.onDefinitiveFailure(runtimeException);
assertNull(responseListener.response.get());
Throwable exception = responseListener.exception.get();
assertSame(runtimeException, exception);
int i = numIters - 1;
do {
assertNotNull(exception.getSuppressed());
assertEquals(1, exception.getSuppressed().length);
assertSame(expectedExceptions[i--], exception.getSuppressed()[0]);
exception = exception.getSuppressed()[0];
} while(i >= 0);
}
}
private static class MockResponseListener implements ResponseListener {
private final AtomicReference<Response> response = new AtomicReference<>();
private final AtomicReference<Exception> exception = new AtomicReference<>();
@Override
public void onSuccess(Response response) {
if (this.response.compareAndSet(null, response) == false) {
throw new IllegalStateException("onSuccess was called multiple times");
}
}
@Override
public void onFailure(Exception exception) {
if (this.exception.compareAndSet(null, exception) == false) {
throw new IllegalStateException("onFailure was called multiple times");
}
}
}
private static Response mockResponse() {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse);
}
}

View File

@ -0,0 +1,114 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.ContentTooLongException;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.StatusLine;
import org.apache.http.entity.BasicHttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.ContentDecoder;
import org.apache.http.nio.IOControl;
import org.apache.http.protocol.HttpContext;
import static org.elasticsearch.client.HeapBufferedAsyncResponseConsumer.DEFAULT_BUFFER_LIMIT;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase {
//maximum buffer that this test ends up allocating is 50MB
private static final int MAX_TEST_BUFFER_SIZE = 50 * 1024 * 1024;
public void testResponseProcessing() throws Exception {
ContentDecoder contentDecoder = mock(ContentDecoder.class);
IOControl ioControl = mock(IOControl.class);
HttpContext httpContext = mock(HttpContext.class);
HeapBufferedAsyncResponseConsumer consumer = spy(new HeapBufferedAsyncResponseConsumer());
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
httpResponse.setEntity(new StringEntity("test"));
//everything goes well
consumer.responseReceived(httpResponse);
consumer.consumeContent(contentDecoder, ioControl);
consumer.responseCompleted(httpContext);
verify(consumer).releaseResources();
verify(consumer).buildResult(httpContext);
assertTrue(consumer.isDone());
assertSame(httpResponse, consumer.getResult());
consumer.responseCompleted(httpContext);
verify(consumer, times(1)).releaseResources();
verify(consumer, times(1)).buildResult(httpContext);
}
public void testDefaultBufferLimit() throws Exception {
HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer();
bufferLimitTest(consumer, DEFAULT_BUFFER_LIMIT);
}
public void testConfiguredBufferLimit() throws Exception {
try {
new HeapBufferedAsyncResponseConsumer(randomIntBetween(Integer.MIN_VALUE, 0));
} catch(IllegalArgumentException e) {
assertEquals("bufferLimit must be greater than 0", e.getMessage());
}
try {
new HeapBufferedAsyncResponseConsumer(0);
} catch(IllegalArgumentException e) {
assertEquals("bufferLimit must be greater than 0", e.getMessage());
}
int bufferLimit = randomIntBetween(1, MAX_TEST_BUFFER_SIZE - 100);
HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer(bufferLimit);
bufferLimitTest(consumer, bufferLimit);
}
private static void bufferLimitTest(HeapBufferedAsyncResponseConsumer consumer, int bufferLimit) throws Exception {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
consumer.onResponseReceived(new BasicHttpResponse(statusLine));
BasicHttpEntity entity = new BasicHttpEntity();
entity.setContentLength(randomInt(bufferLimit));
consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON);
entity.setContentLength(randomIntBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE));
try {
consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON);
} catch(ContentTooLongException e) {
assertEquals("entity content is too long [" + entity.getContentLength() +
"] for the configured buffer limit [" + bufferLimit + "]", e.getMessage());
}
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.client;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import java.io.IOException;
import java.util.HashSet; import java.util.HashSet;
import java.util.Set; import java.util.Set;
@ -30,13 +29,13 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat; import static org.junit.Assert.assertThat;
/** /**
* {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called * {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called for which host.
*/ */
class TrackingFailureListener extends RestClient.FailureListener { class HostsTrackingFailureListener extends RestClient.FailureListener {
private Set<HttpHost> hosts = new HashSet<>(); private volatile Set<HttpHost> hosts = new HashSet<>();
@Override @Override
public void onFailure(HttpHost host) throws IOException { public void onFailure(HttpHost host) {
hosts.add(host); hosts.add(host);
} }

View File

@ -35,6 +35,8 @@ import org.apache.http.entity.InputStreamEntity;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHttpResponse; import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine; import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.entity.NByteArrayEntity;
import org.apache.http.nio.entity.NStringEntity;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import java.io.ByteArrayInputStream; import java.io.ByteArrayInputStream;
@ -97,14 +99,24 @@ public class RequestLoggerTests extends RestClientTestCase {
expected += " -d '" + requestBody + "'"; expected += " -d '" + requestBody + "'";
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
HttpEntity entity; HttpEntity entity;
if (getRandom().nextBoolean()) { switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) {
entity = new StringEntity(requestBody, StandardCharsets.UTF_8); case 0:
} else { entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8))); break;
case 1:
entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)));
break;
case 2:
entity = new NStringEntity(requestBody, StandardCharsets.UTF_8);
break;
case 3:
entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8));
break;
default:
throw new UnsupportedOperationException();
} }
enclosingRequest.setEntity(entity); enclosingRequest.setEntity(entity);
} }
String traceRequest = RequestLogger.buildTraceRequest(request, host); String traceRequest = RequestLogger.buildTraceRequest(request, host);
assertThat(traceRequest, equalTo(expected)); assertThat(traceRequest, equalTo(expected));
if (hasBody) { if (hasBody) {

View File

@ -23,7 +23,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomInts;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.client.config.RequestConfig; import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.message.BasicHeader; import org.apache.http.message.BasicHeader;
import java.io.IOException; import java.io.IOException;
@ -50,12 +50,16 @@ public class RestClientBuilderTests extends RestClientTestCase {
} }
try { try {
RestClient.builder(new HttpHost[]{new HttpHost("localhost", 9200), null}).build(); RestClient.builder(new HttpHost("localhost", 9200), null);
fail("should have failed"); fail("should have failed");
} catch(NullPointerException e) { } catch(NullPointerException e) {
assertEquals("host cannot be null", e.getMessage()); assertEquals("host cannot be null", e.getMessage());
} }
try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) {
assertNotNull(restClient);
}
try { try {
RestClient.builder(new HttpHost("localhost", 9200)) RestClient.builder(new HttpHost("localhost", 9200))
.setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0)); .setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
@ -104,18 +108,20 @@ public class RestClientBuilderTests extends RestClientTestCase {
for (int i = 0; i < numNodes; i++) { for (int i = 0; i < numNodes; i++) {
hosts[i] = new HttpHost("localhost", 9200 + i); hosts[i] = new HttpHost("localhost", 9200 + i);
} }
RestClient.Builder builder = RestClient.builder(hosts); RestClientBuilder builder = RestClient.builder(hosts);
if (getRandom().nextBoolean()) { if (getRandom().nextBoolean()) {
builder.setHttpClientConfigCallback(new RestClient.HttpClientConfigCallback() { builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
@Override @Override
public void customizeHttpClient(HttpClientBuilder httpClientBuilder) { public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
return httpClientBuilder;
} }
}); });
} }
if (getRandom().nextBoolean()) { if (getRandom().nextBoolean()) {
builder.setRequestConfigCallback(new RestClient.RequestConfigCallback() { builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
@Override @Override
public void customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) { public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
return requestConfigBuilder;
} }
}); });
} }

View File

@ -27,6 +27,7 @@ import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpServer; import com.sun.net.httpserver.HttpServer;
import org.apache.http.Consts; import org.apache.http.Consts;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader; import org.apache.http.message.BasicHeader;
@ -47,6 +48,9 @@ import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes;
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
@ -140,10 +144,10 @@ public class RestClientIntegTests extends RestClientTestCase {
* to set/add headers to the {@link org.apache.http.client.HttpClient}. * to set/add headers to the {@link org.apache.http.client.HttpClient}.
* Exercises the test http server ability to send back whatever headers it received. * Exercises the test http server ability to send back whatever headers it received.
*/ */
public void testHeaders() throws Exception { public void testHeaders() throws IOException {
for (String method : getHttpMethods()) { for (String method : getHttpMethods()) {
Set<String> standardHeaders = new HashSet<>( Set<String> standardHeaders = new HashSet<>(
Arrays.asList("Accept-encoding", "Connection", "Host", "User-agent", "Date")); Arrays.asList("Connection", "Host", "User-agent", "Date"));
if (method.equals("HEAD") == false) { if (method.equals("HEAD") == false) {
standardHeaders.add("Content-length"); standardHeaders.add("Content-length");
} }
@ -162,9 +166,9 @@ public class RestClientIntegTests extends RestClientTestCase {
int statusCode = randomStatusCode(getRandom()); int statusCode = randomStatusCode(getRandom());
Response esResponse; Response esResponse;
try (Response response = restClient.performRequest(method, "/" + statusCode, try {
Collections.<String, String>emptyMap(), null, headers)) { esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(),
esResponse = response; (HttpEntity)null, headers);
} catch(ResponseException e) { } catch(ResponseException e) {
esResponse = e.getResponse(); esResponse = e.getResponse();
} }
@ -188,7 +192,7 @@ public class RestClientIntegTests extends RestClientTestCase {
* out of the box by {@link org.apache.http.client.HttpClient}. * out of the box by {@link org.apache.http.client.HttpClient}.
* Exercises the test http server ability to send back whatever body it received. * Exercises the test http server ability to send back whatever body it received.
*/ */
public void testDeleteWithBody() throws Exception { public void testDeleteWithBody() throws IOException {
bodyTest("DELETE"); bodyTest("DELETE");
} }
@ -197,25 +201,74 @@ public class RestClientIntegTests extends RestClientTestCase {
* out of the box by {@link org.apache.http.client.HttpClient}. * out of the box by {@link org.apache.http.client.HttpClient}.
* Exercises the test http server ability to send back whatever body it received. * Exercises the test http server ability to send back whatever body it received.
*/ */
public void testGetWithBody() throws Exception { public void testGetWithBody() throws IOException {
bodyTest("GET"); bodyTest("GET");
} }
private void bodyTest(String method) throws Exception { private void bodyTest(String method) throws IOException {
String requestBody = "{ \"field\": \"value\" }"; String requestBody = "{ \"field\": \"value\" }";
StringEntity entity = new StringEntity(requestBody); StringEntity entity = new StringEntity(requestBody);
Response esResponse;
String responseBody;
int statusCode = randomStatusCode(getRandom()); int statusCode = randomStatusCode(getRandom());
try (Response response = restClient.performRequest(method, "/" + statusCode, Response esResponse;
Collections.<String, String>emptyMap(), entity)) { try {
responseBody = EntityUtils.toString(response.getEntity()); esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), entity);
esResponse = response;
} catch(ResponseException e) { } catch(ResponseException e) {
responseBody = e.getResponseBody();
esResponse = e.getResponse(); esResponse = e.getResponse();
} }
assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); assertEquals(statusCode, esResponse.getStatusLine().getStatusCode());
assertEquals(requestBody, responseBody); assertEquals(requestBody, EntityUtils.toString(esResponse.getEntity()));
}
public void testAsyncRequests() throws Exception {
int numRequests = randomIntBetween(5, 20);
final CountDownLatch latch = new CountDownLatch(numRequests);
final List<TestResponse> responses = new CopyOnWriteArrayList<>();
for (int i = 0; i < numRequests; i++) {
final String method = RestClientTestUtil.randomHttpMethod(getRandom());
final int statusCode = randomStatusCode(getRandom());
restClient.performRequest(method, "/" + statusCode, new ResponseListener() {
@Override
public void onSuccess(Response response) {
responses.add(new TestResponse(method, statusCode, response));
latch.countDown();
}
@Override
public void onFailure(Exception exception) {
responses.add(new TestResponse(method, statusCode, exception));
latch.countDown();
}
});
}
assertTrue(latch.await(5, TimeUnit.SECONDS));
assertEquals(numRequests, responses.size());
for (TestResponse response : responses) {
assertEquals(response.method, response.getResponse().getRequestLine().getMethod());
assertEquals(response.statusCode, response.getResponse().getStatusLine().getStatusCode());
}
}
private static class TestResponse {
private final String method;
private final int statusCode;
private final Object response;
TestResponse(String method, int statusCode, Object response) {
this.method = method;
this.statusCode = statusCode;
this.response = response;
}
Response getResponse() {
if (response instanceof Response) {
return (Response) response;
}
if (response instanceof ResponseException) {
return ((ResponseException) response).getResponse();
}
throw new AssertionError("unexpected response " + response.getClass());
}
} }
} }

View File

@ -22,14 +22,17 @@ package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomInts;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.HttpRequest; import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion; import org.apache.http.ProtocolVersion;
import org.apache.http.StatusLine; import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.conn.ConnectTimeoutException; import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine; import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import org.junit.Before; import org.junit.Before;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;
@ -39,6 +42,7 @@ import java.net.SocketTimeoutException;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.Set; import java.util.Set;
import java.util.concurrent.Future;
import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode;
import static org.elasticsearch.client.RestClientTestUtil.randomErrorRetryStatusCode; import static org.elasticsearch.client.RestClientTestUtil.randomErrorRetryStatusCode;
@ -62,57 +66,61 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
private RestClient restClient; private RestClient restClient;
private HttpHost[] httpHosts; private HttpHost[] httpHosts;
private TrackingFailureListener failureListener; private HostsTrackingFailureListener failureListener;
@Before @Before
@SuppressWarnings("unchecked")
public void createRestClient() throws IOException { public void createRestClient() throws IOException {
CloseableHttpClient httpClient = mock(CloseableHttpClient.class); CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class);
when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer<CloseableHttpResponse>() { when(httpClient.<HttpResponse>execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class),
any(FutureCallback.class))).thenAnswer(new Answer<Future<HttpResponse>>() {
@Override @Override
public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable { public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
HttpHost httpHost = (HttpHost) invocationOnMock.getArguments()[0]; HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0];
HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1]; HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest();
HttpHost httpHost = requestProducer.getTarget();
FutureCallback<HttpResponse> futureCallback = (FutureCallback<HttpResponse>) invocationOnMock.getArguments()[2];
//return the desired status code or exception depending on the path //return the desired status code or exception depending on the path
if (request.getURI().getPath().equals("/soe")) { if (request.getURI().getPath().equals("/soe")) {
throw new SocketTimeoutException(httpHost.toString()); futureCallback.failed(new SocketTimeoutException(httpHost.toString()));
} else if (request.getURI().getPath().equals("/coe")) { } else if (request.getURI().getPath().equals("/coe")) {
throw new ConnectTimeoutException(httpHost.toString()); futureCallback.failed(new ConnectTimeoutException(httpHost.toString()));
} else if (request.getURI().getPath().equals("/ioe")) { } else if (request.getURI().getPath().equals("/ioe")) {
throw new IOException(httpHost.toString()); futureCallback.failed(new IOException(httpHost.toString()));
} else {
int statusCode = Integer.parseInt(request.getURI().getPath().substring(1));
StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
futureCallback.completed(new BasicHttpResponse(statusLine));
} }
int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); return null;
StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
return new CloseableBasicHttpResponse(statusLine);
} }
}); });
int numHosts = RandomInts.randomIntBetween(getRandom(), 2, 5); int numHosts = RandomInts.randomIntBetween(getRandom(), 2, 5);
httpHosts = new HttpHost[numHosts]; httpHosts = new HttpHost[numHosts];
for (int i = 0; i < numHosts; i++) { for (int i = 0; i < numHosts; i++) {
httpHosts[i] = new HttpHost("localhost", 9200 + i); httpHosts[i] = new HttpHost("localhost", 9200 + i);
} }
failureListener = new TrackingFailureListener(); failureListener = new HostsTrackingFailureListener();
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, failureListener); restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, failureListener);
} }
public void testRoundRobinOkStatusCodes() throws Exception { public void testRoundRobinOkStatusCodes() throws IOException {
int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5); int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5);
for (int i = 0; i < numIters; i++) { for (int i = 0; i < numIters; i++) {
Set<HttpHost> hostsSet = new HashSet<>(); Set<HttpHost> hostsSet = new HashSet<>();
Collections.addAll(hostsSet, httpHosts); Collections.addAll(hostsSet, httpHosts);
for (int j = 0; j < httpHosts.length; j++) { for (int j = 0; j < httpHosts.length; j++) {
int statusCode = randomOkStatusCode(getRandom()); int statusCode = randomOkStatusCode(getRandom());
try (Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) { Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode);
assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); assertEquals(statusCode, response.getStatusLine().getStatusCode());
assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
}
} }
assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
} }
failureListener.assertNotCalled(); failureListener.assertNotCalled();
} }
public void testRoundRobinNoRetryErrors() throws Exception { public void testRoundRobinNoRetryErrors() throws IOException {
int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5); int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5);
for (int i = 0; i < numIters; i++) { for (int i = 0; i < numIters; i++) {
Set<HttpHost> hostsSet = new HashSet<>(); Set<HttpHost> hostsSet = new HashSet<>();
@ -120,11 +128,12 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
for (int j = 0; j < httpHosts.length; j++) { for (int j = 0; j < httpHosts.length; j++) {
String method = randomHttpMethod(getRandom()); String method = randomHttpMethod(getRandom());
int statusCode = randomErrorNoRetryStatusCode(getRandom()); int statusCode = randomErrorNoRetryStatusCode(getRandom());
try (Response response = restClient.performRequest(method, "/" + statusCode)) { try {
Response response = restClient.performRequest(method, "/" + statusCode);
if (method.equals("HEAD") && statusCode == 404) { if (method.equals("HEAD") && statusCode == 404) {
//no exception gets thrown although we got a 404 //no exception gets thrown although we got a 404
assertThat(response.getStatusLine().getStatusCode(), equalTo(404)); assertEquals(404, response.getStatusLine().getStatusCode());
assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); assertEquals(statusCode, response.getStatusLine().getStatusCode());
assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
} else { } else {
fail("request should have failed"); fail("request should have failed");
@ -134,7 +143,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
throw e; throw e;
} }
Response response = e.getResponse(); Response response = e.getResponse();
assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); assertEquals(statusCode, response.getStatusLine().getStatusCode());
assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
assertEquals(0, e.getSuppressed().length); assertEquals(0, e.getSuppressed().length);
} }
@ -144,7 +153,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
failureListener.assertNotCalled(); failureListener.assertNotCalled();
} }
public void testRoundRobinRetryErrors() throws Exception { public void testRoundRobinRetryErrors() throws IOException {
String retryEndpoint = randomErrorRetryEndpoint(); String retryEndpoint = randomErrorRetryEndpoint();
try { try {
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint); restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
@ -156,7 +165,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
failureListener.assertCalled(httpHosts); failureListener.assertCalled(httpHosts);
do { do {
Response response = e.getResponse(); Response response = e.getResponse();
assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusLine().getStatusCode());
assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times", assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times",
hostsSet.remove(response.getHost())); hostsSet.remove(response.getHost()));
if (e.getSuppressed().length > 0) { if (e.getSuppressed().length > 0) {
@ -223,8 +232,8 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
for (int y = 0; y < iters; y++) { for (int y = 0; y < iters; y++) {
int statusCode = randomErrorNoRetryStatusCode(getRandom()); int statusCode = randomErrorNoRetryStatusCode(getRandom());
Response response; Response response;
try (Response esResponse = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) { try {
response = esResponse; response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode);
} }
catch(ResponseException e) { catch(ResponseException e) {
response = e.getResponse(); response = e.getResponse();

View File

@ -26,9 +26,9 @@ import org.apache.http.HttpEntity;
import org.apache.http.HttpEntityEnclosingRequest; import org.apache.http.HttpEntityEnclosingRequest;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.HttpRequest; import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion; import org.apache.http.ProtocolVersion;
import org.apache.http.StatusLine; import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpHead; import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpOptions; import org.apache.http.client.methods.HttpOptions;
import org.apache.http.client.methods.HttpPatch; import org.apache.http.client.methods.HttpPatch;
@ -37,11 +37,15 @@ import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpTrace; import org.apache.http.client.methods.HttpTrace;
import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.utils.URIBuilder; import org.apache.http.client.utils.URIBuilder;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.conn.ConnectTimeoutException; import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.message.BasicHeader; import org.apache.http.message.BasicHeader;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine; import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import org.apache.http.util.EntityUtils; import org.apache.http.util.EntityUtils;
import org.junit.Before; import org.junit.Before;
import org.mockito.ArgumentCaptor; import org.mockito.ArgumentCaptor;
@ -51,11 +55,11 @@ import org.mockito.stubbing.Answer;
import java.io.IOException; import java.io.IOException;
import java.net.SocketTimeoutException; import java.net.SocketTimeoutException;
import java.net.URI; import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
import java.util.concurrent.Future;
import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes;
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
@ -86,39 +90,48 @@ public class RestClientSingleHostTests extends RestClientTestCase {
private RestClient restClient; private RestClient restClient;
private Header[] defaultHeaders; private Header[] defaultHeaders;
private HttpHost httpHost; private HttpHost httpHost;
private CloseableHttpClient httpClient; private CloseableHttpAsyncClient httpClient;
private TrackingFailureListener failureListener; private HostsTrackingFailureListener failureListener;
@Before @Before
@SuppressWarnings("unchecked")
public void createRestClient() throws IOException { public void createRestClient() throws IOException {
httpClient = mock(CloseableHttpClient.class); httpClient = mock(CloseableHttpAsyncClient.class);
when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer<CloseableHttpResponse>() { when(httpClient.<HttpResponse>execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class),
@Override any(FutureCallback.class))).thenAnswer(new Answer<Future<HttpResponse>>() {
public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable { @Override
HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1]; public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
//return the desired status code or exception depending on the path HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0];
if (request.getURI().getPath().equals("/soe")) { FutureCallback<HttpResponse> futureCallback = (FutureCallback<HttpResponse>) invocationOnMock.getArguments()[2];
throw new SocketTimeoutException(); HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest();
} else if (request.getURI().getPath().equals("/coe")) { //return the desired status code or exception depending on the path
throw new ConnectTimeoutException(); if (request.getURI().getPath().equals("/soe")) {
} futureCallback.failed(new SocketTimeoutException());
int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); } else if (request.getURI().getPath().equals("/coe")) {
StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, ""); futureCallback.failed(new ConnectTimeoutException());
} else {
int statusCode = Integer.parseInt(request.getURI().getPath().substring(1));
StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
CloseableHttpResponse httpResponse = new CloseableBasicHttpResponse(statusLine); HttpResponse httpResponse = new BasicHttpResponse(statusLine);
//return the same body that was sent //return the same body that was sent
if (request instanceof HttpEntityEnclosingRequest) { if (request instanceof HttpEntityEnclosingRequest) {
HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity(); HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity();
if (entity != null) { if (entity != null) {
assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); assertTrue("the entity is not repeatable, cannot set it to the response directly",
httpResponse.setEntity(entity); entity.isRepeatable());
httpResponse.setEntity(entity);
}
}
//return the same headers that were sent
httpResponse.setHeaders(request.getAllHeaders());
futureCallback.completed(httpResponse);
}
return null;
} }
} });
//return the same headers that were sent
httpResponse.setHeaders(request.getAllHeaders());
return httpResponse;
}
});
int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3); int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
defaultHeaders = new Header[numHeaders]; defaultHeaders = new Header[numHeaders];
for (int i = 0; i < numHeaders; i++) { for (int i = 0; i < numHeaders; i++) {
@ -127,20 +140,22 @@ public class RestClientSingleHostTests extends RestClientTestCase {
defaultHeaders[i] = new BasicHeader(headerName, headerValue); defaultHeaders[i] = new BasicHeader(headerName, headerValue);
} }
httpHost = new HttpHost("localhost", 9200); httpHost = new HttpHost("localhost", 9200);
failureListener = new TrackingFailureListener(); failureListener = new HostsTrackingFailureListener();
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, failureListener); restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, failureListener);
} }
/** /**
* Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client * Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client
*/ */
@SuppressWarnings("unchecked")
public void testInternalHttpRequest() throws Exception { public void testInternalHttpRequest() throws Exception {
ArgumentCaptor<HttpUriRequest> requestArgumentCaptor = ArgumentCaptor.forClass(HttpUriRequest.class); ArgumentCaptor<HttpAsyncRequestProducer> requestArgumentCaptor = ArgumentCaptor.forClass(HttpAsyncRequestProducer.class);
int times = 0; int times = 0;
for (String httpMethod : getHttpMethods()) { for (String httpMethod : getHttpMethods()) {
HttpUriRequest expectedRequest = performRandomRequest(httpMethod); HttpUriRequest expectedRequest = performRandomRequest(httpMethod);
verify(httpClient, times(++times)).execute(any(HttpHost.class), requestArgumentCaptor.capture()); verify(httpClient, times(++times)).<HttpResponse>execute(requestArgumentCaptor.capture(),
HttpUriRequest actualRequest = requestArgumentCaptor.getValue(); any(HttpAsyncResponseConsumer.class), any(FutureCallback.class));
HttpUriRequest actualRequest = (HttpUriRequest)requestArgumentCaptor.getValue().generateRequest();
assertEquals(expectedRequest.getURI(), actualRequest.getURI()); assertEquals(expectedRequest.getURI(), actualRequest.getURI());
assertEquals(expectedRequest.getClass(), actualRequest.getClass()); assertEquals(expectedRequest.getClass(), actualRequest.getClass());
assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders()); assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders());
@ -184,7 +199,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
/** /**
* End to end test for ok status codes * End to end test for ok status codes
*/ */
public void testOkStatusCodes() throws Exception { public void testOkStatusCodes() throws IOException {
for (String method : getHttpMethods()) { for (String method : getHttpMethods()) {
for (int okStatusCode : getOkStatusCodes()) { for (int okStatusCode : getOkStatusCodes()) {
Response response = performRequest(method, "/" + okStatusCode); Response response = performRequest(method, "/" + okStatusCode);
@ -197,11 +212,12 @@ public class RestClientSingleHostTests extends RestClientTestCase {
/** /**
* End to end test for error status codes: they should cause an exception to be thrown, apart from 404 with HEAD requests * End to end test for error status codes: they should cause an exception to be thrown, apart from 404 with HEAD requests
*/ */
public void testErrorStatusCodes() throws Exception { public void testErrorStatusCodes() throws IOException {
for (String method : getHttpMethods()) { for (String method : getHttpMethods()) {
//error status codes should cause an exception to be thrown //error status codes should cause an exception to be thrown
for (int errorStatusCode : getAllErrorStatusCodes()) { for (int errorStatusCode : getAllErrorStatusCodes()) {
try (Response response = performRequest(method, "/" + errorStatusCode)) { try {
Response response = performRequest(method, "/" + errorStatusCode);
if (method.equals("HEAD") && errorStatusCode == 404) { if (method.equals("HEAD") && errorStatusCode == 404) {
//no exception gets thrown although we got a 404 //no exception gets thrown although we got a 404
assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode)); assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode));
@ -247,16 +263,14 @@ public class RestClientSingleHostTests extends RestClientTestCase {
* End to end test for request and response body. Exercises the mock http client ability to send back * End to end test for request and response body. Exercises the mock http client ability to send back
* whatever body it has received. * whatever body it has received.
*/ */
public void testBody() throws Exception { public void testBody() throws IOException {
String body = "{ \"field\": \"value\" }"; String body = "{ \"field\": \"value\" }";
StringEntity entity = new StringEntity(body); StringEntity entity = new StringEntity(body);
for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) { for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) {
for (int okStatusCode : getOkStatusCodes()) { for (int okStatusCode : getOkStatusCodes()) {
try (Response response = restClient.performRequest(method, "/" + okStatusCode, Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.<String, String>emptyMap(), entity);
Collections.<String, String>emptyMap(), entity)) { assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
}
} }
for (int errorStatusCode : getAllErrorStatusCodes()) { for (int errorStatusCode : getAllErrorStatusCodes()) {
try { try {
@ -279,7 +293,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
} }
} }
public void testNullHeaders() throws Exception { public void testNullHeaders() throws IOException {
String method = randomHttpMethod(getRandom()); String method = randomHttpMethod(getRandom());
int statusCode = randomStatusCode(getRandom()); int statusCode = randomStatusCode(getRandom());
try { try {
@ -296,7 +310,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
} }
} }
public void testNullParams() throws Exception { public void testNullParams() throws IOException {
String method = randomHttpMethod(getRandom()); String method = randomHttpMethod(getRandom());
int statusCode = randomStatusCode(getRandom()); int statusCode = randomStatusCode(getRandom());
try { try {
@ -317,7 +331,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
* End to end test for request and response headers. Exercises the mock http client ability to send back * End to end test for request and response headers. Exercises the mock http client ability to send back
* whatever headers it has received. * whatever headers it has received.
*/ */
public void testHeaders() throws Exception { public void testHeaders() throws IOException {
for (String method : getHttpMethods()) { for (String method : getHttpMethods()) {
Map<String, String> expectedHeaders = new HashMap<>(); Map<String, String> expectedHeaders = new HashMap<>();
for (Header defaultHeader : defaultHeaders) { for (Header defaultHeader : defaultHeaders) {
@ -334,9 +348,8 @@ public class RestClientSingleHostTests extends RestClientTestCase {
int statusCode = randomStatusCode(getRandom()); int statusCode = randomStatusCode(getRandom());
Response esResponse; Response esResponse;
try (Response response = restClient.performRequest(method, "/" + statusCode, try {
Collections.<String, String>emptyMap(), null, headers)) { esResponse = restClient.performRequest(method, "/" + statusCode, headers);
esResponse = response;
} catch(ResponseException e) { } catch(ResponseException e) {
esResponse = e.getResponse(); esResponse = e.getResponse();
} }
@ -349,7 +362,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
} }
} }
private HttpUriRequest performRandomRequest(String method) throws IOException, URISyntaxException { private HttpUriRequest performRandomRequest(String method) throws Exception {
String uriAsString = "/" + randomStatusCode(getRandom()); String uriAsString = "/" + randomStatusCode(getRandom());
URIBuilder uriBuilder = new URIBuilder(uriAsString); URIBuilder uriBuilder = new URIBuilder(uriAsString);
Map<String, String> params = Collections.emptyMap(); Map<String, String> params = Collections.emptyMap();
@ -441,7 +454,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
case 1: case 1:
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers); return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers);
case 2: case 2:
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, headers); return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), (HttpEntity)null, headers);
default: default:
throw new UnsupportedOperationException(); throw new UnsupportedOperationException();
} }

View File

@ -0,0 +1,172 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.RequestLine;
import org.apache.http.StatusLine;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicRequestLine;
import org.apache.http.message.BasicStatusLine;
import java.io.IOException;
import java.net.URISyntaxException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.fail;
public class SyncResponseListenerTests extends RestClientTestCase {
public void testOnSuccessNullResponse() {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
try {
syncResponseListener.onSuccess(null);
fail("onSuccess should have failed");
} catch(NullPointerException e) {
assertEquals("response must not be null", e.getMessage());
}
}
public void testOnFailureNullException() {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
try {
syncResponseListener.onFailure(null);
fail("onFailure should have failed");
} catch(NullPointerException e) {
assertEquals("exception must not be null", e.getMessage());
}
}
public void testOnSuccess() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
Response mockResponse = mockResponse();
syncResponseListener.onSuccess(mockResponse);
Response response = syncResponseListener.get();
assertSame(response, mockResponse);
try {
syncResponseListener.onSuccess(mockResponse);
fail("get should have failed");
} catch(IllegalStateException e) {
assertEquals(e.getMessage(), "response is already set");
}
response = syncResponseListener.get();
assertSame(response, mockResponse);
RuntimeException runtimeException = new RuntimeException("test");
syncResponseListener.onFailure(runtimeException);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(IllegalStateException e) {
assertEquals("response and exception are unexpectedly set at the same time", e.getMessage());
assertNotNull(e.getSuppressed());
assertEquals(1, e.getSuppressed().length);
assertSame(runtimeException, e.getSuppressed()[0]);
}
}
public void testOnFailure() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
RuntimeException firstException = new RuntimeException("first-test");
syncResponseListener.onFailure(firstException);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(RuntimeException e) {
assertSame(firstException, e);
}
RuntimeException secondException = new RuntimeException("second-test");
try {
syncResponseListener.onFailure(secondException);
} catch(IllegalStateException e) {
assertEquals(e.getMessage(), "exception is already set");
}
try {
syncResponseListener.get();
fail("get should have failed");
} catch(RuntimeException e) {
assertSame(firstException, e);
}
Response response = mockResponse();
syncResponseListener.onSuccess(response);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(IllegalStateException e) {
assertEquals("response and exception are unexpectedly set at the same time", e.getMessage());
assertNotNull(e.getSuppressed());
assertEquals(1, e.getSuppressed().length);
assertSame(firstException, e.getSuppressed()[0]);
}
}
public void testRuntimeExceptionIsNotWrapped() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
RuntimeException runtimeException = new RuntimeException();
syncResponseListener.onFailure(runtimeException);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(RuntimeException e) {
assertSame(runtimeException, e);
}
}
public void testIOExceptionIsNotWrapped() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
IOException ioException = new IOException();
syncResponseListener.onFailure(ioException);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(IOException e) {
assertSame(ioException, e);
}
}
public void testExceptionIsWrapped() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
//we just need any checked exception
URISyntaxException exception = new URISyntaxException("test", "test");
syncResponseListener.onFailure(exception);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(RuntimeException e) {
assertEquals("error while performing request", e.getMessage());
assertSame(exception, e.getCause());
}
}
private static Response mockResponse() {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse);
}
}

View File

@ -58,6 +58,11 @@ forbiddenApisTest {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
} }
dependencyLicenses {
mapping from: /http.*/, to: 'httpclient'
mapping from: /commons-.*/, to: 'commons'
}
//JarHell is part of es core, which we don't want to pull in //JarHell is part of es core, which we don't want to pull in
jarHell.enabled=false jarHell.enabled=false

View File

@ -1,6 +0,0 @@
Apache Commons Logging
Copyright 2003-2014 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -1 +0,0 @@
b31526a230871fbe285fbcbe2813f9c0839ae9b0

View File

@ -0,0 +1 @@
e7501a1b34325abb00d17dde96150604a0658b54

View File

@ -1,558 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
=========================================================================
This project includes Public Suffix List copied from
<https://publicsuffix.org/list/effective_tld_names.dat>
licensed under the terms of the Mozilla Public License, v. 2.0
Full license text: <http://mozilla.org/MPL/2.0/>
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@ -1,6 +0,0 @@
Apache HttpComponents Client
Copyright 1999-2016 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1,170 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.sniff;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
/**
* Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back.
* Compatible with elasticsearch 5.x and 2.x.
*/
public final class ElasticsearchHostsSniffer implements HostsSniffer {
private static final Log logger = LogFactory.getLog(ElasticsearchHostsSniffer.class);
public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1);
private final RestClient restClient;
private final Map<String, String> sniffRequestParams;
private final Scheme scheme;
private final JsonFactory jsonFactory = new JsonFactory();
/**
* Creates a new instance of the Elasticsearch sniffer. It will use the provided {@link RestClient} to fetch the hosts,
* through the nodes info api, the default sniff request timeout value {@link #DEFAULT_SNIFF_REQUEST_TIMEOUT} and http
* as the scheme for all the hosts.
* @param restClient client used to fetch the hosts from elasticsearch through nodes info api. Usually the same instance
* that is also provided to {@link Sniffer#builder(RestClient)}, so that the hosts are set to the same
* client that was used to fetch them.
*/
public ElasticsearchHostsSniffer(RestClient restClient) {
this(restClient, DEFAULT_SNIFF_REQUEST_TIMEOUT, ElasticsearchHostsSniffer.Scheme.HTTP);
}
/**
* Creates a new instance of the Elasticsearch sniffer. It will use the provided {@link RestClient} to fetch the hosts
* through the nodes info api, the provided sniff request timeout value and scheme.
* @param restClient client used to fetch the hosts from elasticsearch through nodes info api. Usually the same instance
* that is also provided to {@link Sniffer#builder(RestClient)}, so that the hosts are set to the same
* client that was used to sniff them.
* @param sniffRequestTimeoutMillis the sniff request timeout (in milliseconds) to be passed in as a query string parameter
* to elasticsearch. Allows to halt the request without any failure, as only the nodes
* that have responded within this timeout will be returned.
* @param scheme the scheme to associate sniffed nodes with (as it is not returned by elasticsearch)
*/
public ElasticsearchHostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) {
this.restClient = Objects.requireNonNull(restClient, "restClient cannot be null");
if (sniffRequestTimeoutMillis < 0) {
throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0");
}
this.sniffRequestParams = Collections.<String, String>singletonMap("timeout", sniffRequestTimeoutMillis + "ms");
this.scheme = Objects.requireNonNull(scheme, "scheme cannot be null");
}
/**
* Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts
*/
public List<HttpHost> sniffHosts() throws IOException {
Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams);
return readHosts(response.getEntity());
}
private List<HttpHost> readHosts(HttpEntity entity) throws IOException {
try (InputStream inputStream = entity.getContent()) {
JsonParser parser = jsonFactory.createParser(inputStream);
if (parser.nextToken() != JsonToken.START_OBJECT) {
throw new IOException("expected data to start with an object");
}
List<HttpHost> hosts = new ArrayList<>();
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
if ("nodes".equals(parser.getCurrentName())) {
while (parser.nextToken() != JsonToken.END_OBJECT) {
JsonToken token = parser.nextToken();
assert token == JsonToken.START_OBJECT;
String nodeId = parser.getCurrentName();
HttpHost sniffedHost = readHost(nodeId, parser, this.scheme);
if (sniffedHost != null) {
logger.trace("adding node [" + nodeId + "]");
hosts.add(sniffedHost);
}
}
} else {
parser.skipChildren();
}
}
}
return hosts;
}
}
private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException {
HttpHost httpHost = null;
String fieldName = null;
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.FIELD_NAME) {
fieldName = parser.getCurrentName();
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
if ("http".equals(fieldName)) {
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) {
URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(),
boundAddressAsURI.getScheme());
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
parser.skipChildren();
}
}
} else {
parser.skipChildren();
}
}
}
//http section is not present if http is not enabled on the node, ignore such nodes
if (httpHost == null) {
logger.debug("skipping node [" + nodeId + "] with http disabled");
return null;
}
return httpHost;
}
public enum Scheme {
HTTP("http"), HTTPS("https");
private final String name;
Scheme(String name) {
this.name = name;
}
@Override
public String toString() {
return name;
}
}
}

View File

@ -19,176 +19,17 @@
package org.elasticsearch.client.sniff; package org.elasticsearch.client.sniff;
import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
/** /**
* Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. * Responsible for sniffing the http hosts
* Compatible with elasticsearch 5.x and 2.x.
*/ */
public class HostsSniffer { public interface HostsSniffer {
private static final Log logger = LogFactory.getLog(HostsSniffer.class);
private final RestClient restClient;
private final Map<String, String> sniffRequestParams;
private final Scheme scheme;
private final JsonFactory jsonFactory = new JsonFactory();
protected HostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) {
this.restClient = restClient;
this.sniffRequestParams = Collections.<String, String>singletonMap("timeout", sniffRequestTimeoutMillis + "ms");
this.scheme = scheme;
}
/** /**
* Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts * Returns the sniffed http hosts
*/ */
public List<HttpHost> sniffHosts() throws IOException { List<HttpHost> sniffHosts() throws IOException;
try (Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams)) {
return readHosts(response.getEntity());
}
}
private List<HttpHost> readHosts(HttpEntity entity) throws IOException {
try (InputStream inputStream = entity.getContent()) {
JsonParser parser = jsonFactory.createParser(inputStream);
if (parser.nextToken() != JsonToken.START_OBJECT) {
throw new IOException("expected data to start with an object");
}
List<HttpHost> hosts = new ArrayList<>();
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
if ("nodes".equals(parser.getCurrentName())) {
while (parser.nextToken() != JsonToken.END_OBJECT) {
JsonToken token = parser.nextToken();
assert token == JsonToken.START_OBJECT;
String nodeId = parser.getCurrentName();
HttpHost sniffedHost = readHost(nodeId, parser, this.scheme);
if (sniffedHost != null) {
logger.trace("adding node [" + nodeId + "]");
hosts.add(sniffedHost);
}
}
} else {
parser.skipChildren();
}
}
}
return hosts;
}
}
private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException {
HttpHost httpHost = null;
String fieldName = null;
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.FIELD_NAME) {
fieldName = parser.getCurrentName();
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
if ("http".equals(fieldName)) {
while (parser.nextToken() != JsonToken.END_OBJECT) {
if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) {
URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString());
httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(),
boundAddressAsURI.getScheme());
} else if (parser.getCurrentToken() == JsonToken.START_OBJECT) {
parser.skipChildren();
}
}
} else {
parser.skipChildren();
}
}
}
//http section is not present if http is not enabled on the node, ignore such nodes
if (httpHost == null) {
logger.debug("skipping node [" + nodeId + "] with http disabled");
return null;
}
return httpHost;
}
/**
* Returns a new {@link Builder} to help with {@link HostsSniffer} creation.
*/
public static Builder builder(RestClient restClient) {
return new Builder(restClient);
}
public enum Scheme {
HTTP("http"), HTTPS("https");
private final String name;
Scheme(String name) {
this.name = name;
}
@Override
public String toString() {
return name;
}
}
/**
* HostsSniffer builder. Helps creating a new {@link HostsSniffer}.
*/
public static class Builder {
public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1);
private final RestClient restClient;
private long sniffRequestTimeoutMillis = DEFAULT_SNIFF_REQUEST_TIMEOUT;
private Scheme scheme = Scheme.HTTP;
private Builder(RestClient restClient) {
Objects.requireNonNull(restClient, "restClient cannot be null");
this.restClient = restClient;
}
/**
* Sets the sniff request timeout (in milliseconds) to be passed in as a query string parameter to elasticsearch.
* Allows to halt the request without any failure, as only the nodes that have responded within this timeout will be returned.
*/
public Builder setSniffRequestTimeoutMillis(int sniffRequestTimeoutMillis) {
if (sniffRequestTimeoutMillis <= 0) {
throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0");
}
this.sniffRequestTimeoutMillis = sniffRequestTimeoutMillis;
return this;
}
/**
* Sets the scheme to associate sniffed nodes with (as it is not returned by elasticsearch)
*/
public Builder setScheme(Scheme scheme) {
Objects.requireNonNull(scheme, "scheme cannot be null");
this.scheme = scheme;
return this;
}
/**
* Creates a new {@link HostsSniffer} instance given the provided configuration
*/
public HostsSniffer build() {
return new HostsSniffer(restClient, sniffRequestTimeoutMillis, scheme);
}
}
} }

View File

@ -22,7 +22,6 @@ package org.elasticsearch.client.sniff;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import java.io.IOException;
import java.util.Objects; import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
@ -55,7 +54,7 @@ public class SniffOnFailureListener extends RestClient.FailureListener {
} }
@Override @Override
public void onFailure(HttpHost host) throws IOException { public void onFailure(HttpHost host) {
if (sniffer == null) { if (sniffer == null) {
throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); throw new IllegalStateException("sniffer was not set, unable to sniff on failure");
} }

View File

@ -23,11 +23,11 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.List; import java.util.List;
import java.util.Objects;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture; import java.util.concurrent.ScheduledFuture;
@ -35,12 +35,12 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
/** /**
* Class responsible for sniffing nodes from an elasticsearch cluster and setting them to a provided instance of {@link RestClient}. * Class responsible for sniffing nodes from some source (default is elasticsearch itself) and setting them to a provided instance of
* Must be created via {@link Builder}, which allows to set all of the different options or rely on defaults. * {@link RestClient}. Must be created via {@link SnifferBuilder}, which allows to set all of the different options or rely on defaults.
* A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance. * A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance.
* It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to * It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to
* {@link org.elasticsearch.client.RestClient.Builder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation * {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation needs to be lazily set to the
* needs to be lazily set to the previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}. * previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}.
*/ */
public final class Sniffer implements Closeable { public final class Sniffer implements Closeable {
@ -48,7 +48,7 @@ public final class Sniffer implements Closeable {
private final Task task; private final Task task;
private Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) { Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) {
this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay);
} }
@ -143,64 +143,12 @@ public final class Sniffer implements Closeable {
} }
/** /**
* Returns a new {@link Builder} to help with {@link Sniffer} creation. * Returns a new {@link SnifferBuilder} to help with {@link Sniffer} creation.
*
* @param restClient the client that gets its hosts set (via {@link RestClient#setHosts(HttpHost...)}) once they are fetched
* @return a new instance of {@link SnifferBuilder}
*/ */
public static Builder builder(RestClient restClient, HostsSniffer hostsSniffer) { public static SnifferBuilder builder(RestClient restClient) {
return new Builder(restClient, hostsSniffer); return new SnifferBuilder(restClient);
}
/**
* Sniffer builder. Helps creating a new {@link Sniffer}.
*/
public static final class Builder {
public static final long DEFAULT_SNIFF_INTERVAL = TimeUnit.MINUTES.toMillis(5);
public static final long DEFAULT_SNIFF_AFTER_FAILURE_DELAY = TimeUnit.MINUTES.toMillis(1);
private final RestClient restClient;
private final HostsSniffer hostsSniffer;
private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL;
private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY;
/**
* Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch,
* and the
*/
private Builder(RestClient restClient, HostsSniffer hostsSniffer) {
Objects.requireNonNull(restClient, "restClient cannot be null");
this.restClient = restClient;
Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null");
this.hostsSniffer = hostsSniffer;
}
/**
* Sets the interval between consecutive ordinary sniff executions in milliseconds. Will be honoured when
* sniffOnFailure is disabled or when there are no failures between consecutive sniff executions.
* @throws IllegalArgumentException if sniffIntervalMillis is not greater than 0
*/
public Builder setSniffIntervalMillis(int sniffIntervalMillis) {
if (sniffIntervalMillis <= 0) {
throw new IllegalArgumentException("sniffIntervalMillis must be greater than 0");
}
this.sniffIntervalMillis = sniffIntervalMillis;
return this;
}
/**
* Sets the delay of a sniff execution scheduled after a failure (in milliseconds)
*/
public Builder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelayMillis) {
if (sniffAfterFailureDelayMillis <= 0) {
throw new IllegalArgumentException("sniffAfterFailureDelayMillis must be greater than 0");
}
this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis;
return this;
}
/**
* Creates the {@link Sniffer} based on the provided configuration.
*/
public Sniffer build() {
return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis);
}
} }
} }

View File

@ -0,0 +1,91 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.sniff;
import org.elasticsearch.client.RestClient;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
/**
* Sniffer builder. Helps creating a new {@link Sniffer}.
*/
public final class SnifferBuilder {
public static final long DEFAULT_SNIFF_INTERVAL = TimeUnit.MINUTES.toMillis(5);
public static final long DEFAULT_SNIFF_AFTER_FAILURE_DELAY = TimeUnit.MINUTES.toMillis(1);
private final RestClient restClient;
private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL;
private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY;
private HostsSniffer hostsSniffer;
/**
* Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch
*/
SnifferBuilder(RestClient restClient) {
Objects.requireNonNull(restClient, "restClient cannot be null");
this.restClient = restClient;
}
/**
* Sets the interval between consecutive ordinary sniff executions in milliseconds. Will be honoured when
* sniffOnFailure is disabled or when there are no failures between consecutive sniff executions.
* @throws IllegalArgumentException if sniffIntervalMillis is not greater than 0
*/
public SnifferBuilder setSniffIntervalMillis(int sniffIntervalMillis) {
if (sniffIntervalMillis <= 0) {
throw new IllegalArgumentException("sniffIntervalMillis must be greater than 0");
}
this.sniffIntervalMillis = sniffIntervalMillis;
return this;
}
/**
* Sets the delay of a sniff execution scheduled after a failure (in milliseconds)
*/
public SnifferBuilder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelayMillis) {
if (sniffAfterFailureDelayMillis <= 0) {
throw new IllegalArgumentException("sniffAfterFailureDelayMillis must be greater than 0");
}
this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis;
return this;
}
/**
* Sets the {@link HostsSniffer} to be used to read hosts. A default instance of {@link ElasticsearchHostsSniffer}
* is created when not provided. This method can be used to change the configuration of the {@link ElasticsearchHostsSniffer},
* or to provide a different implementation (e.g. in case hosts need to taken from a different source).
*/
public SnifferBuilder setHostsSniffer(HostsSniffer hostsSniffer) {
Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null");
this.hostsSniffer = hostsSniffer;
return this;
}
/**
* Creates the {@link Sniffer} based on the provided configuration.
*/
public Sniffer build() {
if (hostsSniffer == null) {
this.hostsSniffer = new ElasticsearchHostsSniffer(restClient);
}
return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis);
}
}

View File

@ -43,7 +43,6 @@ import java.io.OutputStream;
import java.io.StringWriter; import java.io.StringWriter;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.InetSocketAddress; import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
@ -61,17 +60,17 @@ import static org.junit.Assert.fail;
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
@IgnoreJRERequirement @IgnoreJRERequirement
public class HostsSnifferTests extends RestClientTestCase { public class ElasticsearchHostsSnifferTests extends RestClientTestCase {
private int sniffRequestTimeout; private int sniffRequestTimeout;
private HostsSniffer.Scheme scheme; private ElasticsearchHostsSniffer.Scheme scheme;
private SniffResponse sniffResponse; private SniffResponse sniffResponse;
private HttpServer httpServer; private HttpServer httpServer;
@Before @Before
public void startHttpServer() throws IOException { public void startHttpServer() throws IOException {
this.sniffRequestTimeout = RandomInts.randomIntBetween(getRandom(), 1000, 10000); this.sniffRequestTimeout = RandomInts.randomIntBetween(getRandom(), 1000, 10000);
this.scheme = RandomPicks.randomFrom(getRandom(), HostsSniffer.Scheme.values()); this.scheme = RandomPicks.randomFrom(getRandom(), ElasticsearchHostsSniffer.Scheme.values());
if (rarely()) { if (rarely()) {
this.sniffResponse = SniffResponse.buildFailure(); this.sniffResponse = SniffResponse.buildFailure();
} else { } else {
@ -86,14 +85,35 @@ public class HostsSnifferTests extends RestClientTestCase {
httpServer.stop(0); httpServer.stop(0);
} }
public void testSniffNodes() throws IOException, URISyntaxException { public void testConstructorValidation() throws IOException {
try {
new ElasticsearchHostsSniffer(null, 1, ElasticsearchHostsSniffer.Scheme.HTTP);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("restClient cannot be null", e.getMessage());
}
HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
try (RestClient restClient = RestClient.builder(httpHost).build()) { try (RestClient restClient = RestClient.builder(httpHost).build()) {
HostsSniffer.Builder builder = HostsSniffer.builder(restClient).setSniffRequestTimeoutMillis(sniffRequestTimeout); try {
if (scheme != HostsSniffer.Scheme.HTTP || randomBoolean()) { new ElasticsearchHostsSniffer(restClient, 1, null);
builder.setScheme(scheme); fail("should have failed");
} catch (NullPointerException e) {
assertEquals(e.getMessage(), "scheme cannot be null");
} }
HostsSniffer sniffer = builder.build(); try {
new ElasticsearchHostsSniffer(restClient, RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0),
ElasticsearchHostsSniffer.Scheme.HTTP);
fail("should have failed");
} catch (IllegalArgumentException e) {
assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0");
}
}
}
public void testSniffNodes() throws IOException {
HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
try (RestClient restClient = RestClient.builder(httpHost).build()) {
ElasticsearchHostsSniffer sniffer = new ElasticsearchHostsSniffer(restClient, sniffRequestTimeout, scheme);
try { try {
List<HttpHost> sniffedHosts = sniffer.sniffHosts(); List<HttpHost> sniffedHosts = sniffer.sniffHosts();
if (sniffResponse.isFailure) { if (sniffResponse.isFailure) {
@ -154,7 +174,7 @@ public class HostsSnifferTests extends RestClientTestCase {
} }
} }
private static SniffResponse buildSniffResponse(HostsSniffer.Scheme scheme) throws IOException { private static SniffResponse buildSniffResponse(ElasticsearchHostsSniffer.Scheme scheme) throws IOException {
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5); int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
List<HttpHost> hosts = new ArrayList<>(numNodes); List<HttpHost> hosts = new ArrayList<>(numNodes);
JsonFactory jsonFactory = new JsonFactory(); JsonFactory jsonFactory = new JsonFactory();

View File

@ -1,73 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client.sniff;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientTestCase;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.fail;
public class HostsSnifferBuilderTests extends RestClientTestCase {
public void testBuild() throws Exception {
try {
HostsSniffer.builder(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals(e.getMessage(), "restClient cannot be null");
}
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
HttpHost[] hosts = new HttpHost[numNodes];
for (int i = 0; i < numNodes; i++) {
hosts[i] = new HttpHost("localhost", 9200 + i);
}
try (RestClient client = RestClient.builder(hosts).build()) {
try {
HostsSniffer.builder(client).setScheme(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals(e.getMessage(), "scheme cannot be null");
}
try {
HostsSniffer.builder(client).setSniffRequestTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
fail("should have failed");
} catch(IllegalArgumentException e) {
assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0");
}
HostsSniffer.Builder builder = HostsSniffer.builder(client);
if (getRandom().nextBoolean()) {
builder.setScheme(RandomPicks.randomFrom(getRandom(), HostsSniffer.Scheme.values()));
}
if (getRandom().nextBoolean()) {
builder.setSniffRequestTimeoutMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
}
assertNotNull(builder.build());
}
}
}

View File

@ -22,18 +22,15 @@ package org.elasticsearch.client.sniff;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.Collections;
import java.util.List; import java.util.List;
class MockHostsSniffer extends HostsSniffer { /**
MockHostsSniffer() { * Mock implementation of {@link HostsSniffer}. Useful to prevent any connection attempt while testing builders etc.
super(null, -1, null); */
} class MockHostsSniffer implements HostsSniffer {
@Override @Override
public List<HttpHost> sniffHosts() throws IOException { public List<HttpHost> sniffHosts() throws IOException {
List<HttpHost> hosts = new ArrayList<>(); return Collections.singletonList(new HttpHost("localhost", 9200));
hosts.add(new HttpHost("localhost", 9200));
return hosts;
} }
} }

View File

@ -45,16 +45,17 @@ public class SniffOnFailureListenerTests extends RestClientTestCase {
assertEquals("sniffer must not be null", e.getMessage()); assertEquals("sniffer must not be null", e.getMessage());
} }
RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build(); try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) {
try (Sniffer sniffer = Sniffer.builder(restClient, new MockHostsSniffer()).build()) { try (Sniffer sniffer = Sniffer.builder(restClient).setHostsSniffer(new MockHostsSniffer()).build()) {
listener.setSniffer(sniffer);
try {
listener.setSniffer(sniffer); listener.setSniffer(sniffer);
fail("should have failed"); try {
} catch(IllegalStateException e) { listener.setSniffer(sniffer);
assertEquals("sniffer can only be set once", e.getMessage()); fail("should have failed");
} catch(IllegalStateException e) {
assertEquals("sniffer can only be set once", e.getMessage());
}
listener.onFailure(new HttpHost("localhost", 9200));
} }
listener.onFailure(new HttpHost("localhost", 9200));
} }
} }
} }

View File

@ -37,50 +37,52 @@ public class SnifferBuilderTests extends RestClientTestCase {
hosts[i] = new HttpHost("localhost", 9200 + i); hosts[i] = new HttpHost("localhost", 9200 + i);
} }
HostsSniffer hostsSniffer = new MockHostsSniffer();
try (RestClient client = RestClient.builder(hosts).build()) { try (RestClient client = RestClient.builder(hosts).build()) {
try { try {
Sniffer.builder(null, hostsSniffer).build(); Sniffer.builder(null).build();
fail("should have failed"); fail("should have failed");
} catch(NullPointerException e) { } catch(NullPointerException e) {
assertEquals("restClient cannot be null", e.getMessage()); assertEquals("restClient cannot be null", e.getMessage());
} }
try { try {
Sniffer.builder(client, null).build(); Sniffer.builder(client).setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("hostsSniffer cannot be null", e.getMessage());
}
try {
Sniffer.builder(client, hostsSniffer)
.setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
fail("should have failed"); fail("should have failed");
} catch(IllegalArgumentException e) { } catch(IllegalArgumentException e) {
assertEquals("sniffIntervalMillis must be greater than 0", e.getMessage()); assertEquals("sniffIntervalMillis must be greater than 0", e.getMessage());
} }
try { try {
Sniffer.builder(client, hostsSniffer) Sniffer.builder(client).setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
fail("should have failed"); fail("should have failed");
} catch(IllegalArgumentException e) { } catch(IllegalArgumentException e) {
assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage()); assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage());
} }
try (Sniffer sniffer = Sniffer.builder(client, hostsSniffer).build()) {
try {
Sniffer.builder(client).setHostsSniffer(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("hostsSniffer cannot be null", e.getMessage());
}
try (Sniffer sniffer = Sniffer.builder(client).build()) {
assertNotNull(sniffer); assertNotNull(sniffer);
} }
Sniffer.Builder builder = Sniffer.builder(client, hostsSniffer); SnifferBuilder builder = Sniffer.builder(client);
if (getRandom().nextBoolean()) { if (getRandom().nextBoolean()) {
builder.setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); builder.setSniffIntervalMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
} }
if (getRandom().nextBoolean()) { if (getRandom().nextBoolean()) {
builder.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE)); builder.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(getRandom(), 1, Integer.MAX_VALUE));
} }
if (getRandom().nextBoolean()) {
builder.setHostsSniffer(new MockHostsSniffer());
}
try (Sniffer sniffer = builder.build()) { try (Sniffer sniffer = builder.build()) {
assertNotNull(sniffer); assertNotNull(sniffer);
} }

View File

@ -26,6 +26,7 @@ group = 'org.elasticsearch.client'
dependencies { dependencies {
compile "org.elasticsearch:elasticsearch:${version}" compile "org.elasticsearch:elasticsearch:${version}"
compile project(path: ':modules:transport-netty3', configuration: 'runtime') compile project(path: ':modules:transport-netty3', configuration: 'runtime')
compile project(path: ':modules:transport-netty4', configuration: 'runtime')
compile project(path: ':modules:reindex', configuration: 'runtime') compile project(path: ':modules:reindex', configuration: 'runtime')
compile project(path: ':modules:lang-mustache', configuration: 'runtime') compile project(path: ':modules:lang-mustache', configuration: 'runtime')
compile project(path: ':modules:percolator', configuration: 'runtime') compile project(path: ':modules:percolator', configuration: 'runtime')

View File

@ -16,30 +16,47 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.transport.client; package org.elasticsearch.transport.client;
import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.reindex.ReindexPlugin; import org.elasticsearch.index.reindex.ReindexPlugin;
import org.elasticsearch.percolator.PercolatorPlugin; import org.elasticsearch.percolator.PercolatorPlugin;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.mustache.MustachePlugin; import org.elasticsearch.script.mustache.MustachePlugin;
import org.elasticsearch.transport.Netty3Plugin; import org.elasticsearch.transport.Netty3Plugin;
import org.elasticsearch.transport.Netty4Plugin;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List;
/** /**
* A builder to create an instance of {@link TransportClient} * A builder to create an instance of {@link TransportClient}
* This class pre-installs the {@link Netty3Plugin}, {@link ReindexPlugin}, {@link PercolatorPlugin}, and {@link MustachePlugin} * This class pre-installs the
* {@link Netty3Plugin},
* {@link Netty4Plugin},
* {@link ReindexPlugin},
* {@link PercolatorPlugin},
* and {@link MustachePlugin}
* for the client. These plugins are all elasticsearch core modules required. * for the client. These plugins are all elasticsearch core modules required.
*/ */
@SuppressWarnings({"unchecked","varargs"}) @SuppressWarnings({"unchecked","varargs"})
public class PreBuiltTransportClient extends TransportClient { public class PreBuiltTransportClient extends TransportClient {
private static final Collection<Class<? extends Plugin>> PRE_INSTALLED_PLUGINS = Collections.unmodifiableList(Arrays.asList(
TransportPlugin.class, ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class)); private static final Collection<Class<? extends Plugin>> PRE_INSTALLED_PLUGINS =
Collections.unmodifiableList(
Arrays.asList(
Netty3Plugin.class,
Netty4Plugin.class,
TransportPlugin.class,
ReindexPlugin.class,
PercolatorPlugin.class,
MustachePlugin.class));
@SafeVarargs @SafeVarargs
public PreBuiltTransportClient(Settings settings, Class<? extends Plugin>... plugins) { public PreBuiltTransportClient(Settings settings, Class<? extends Plugin>... plugins) {
@ -50,14 +67,25 @@ public class PreBuiltTransportClient extends TransportClient {
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS)); super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS));
} }
/** public static final class TransportPlugin extends Plugin {
* The default transport implementation for the transport client.
*/ private static final Setting<Boolean> ASSERT_NETTY_BUGLEVEL =
public static final class TransportPlugin extends Netty3Plugin { Setting.boolSetting("netty.assert.buglevel", true, Setting.Property.NodeScope);
// disable assertions for permissions since we might not have the permissions here
// compared to if we are loaded as a real module to the es server @Override
public TransportPlugin(Settings settings) { public List<Setting<?>> getSettings() {
super(Settings.builder().put("netty.assert.buglevel", false).put(settings).build()); return Collections.singletonList(ASSERT_NETTY_BUGLEVEL);
} }
@Override
public Settings additionalSettings() {
return Settings.builder()
.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty3Plugin.NETTY_TRANSPORT_NAME)
.put(NetworkModule.HTTP_TYPE_KEY, Netty3Plugin.NETTY_HTTP_TRANSPORT_NAME)
.put("netty.assert.buglevel", true)
.build();
}
} }
} }

View File

@ -16,6 +16,7 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.transport.client; package org.elasticsearch.transport.client;
import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.RandomizedTest;
@ -57,4 +58,5 @@ public class PreBuiltTransportClientTests extends RandomizedTest {
} }
} }
} }
} }

View File

@ -22,8 +22,10 @@ import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.WriteResponse;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.StatusToXContent; import org.elasticsearch.common.xcontent.StatusToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.IndexSettings;
@ -31,29 +33,83 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
import java.util.Locale;
/** /**
* A base class for the response of a write operation that involves a single doc * A base class for the response of a write operation that involves a single doc
*/ */
public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent { public abstract class DocWriteResponse extends ReplicationResponse implements WriteResponse, StatusToXContent {
public enum Operation implements Writeable {
CREATE(0),
INDEX(1),
DELETE(2),
NOOP(3);
private final byte op;
private final String lowercase;
Operation(int op) {
this.op = (byte) op;
this.lowercase = this.toString().toLowerCase(Locale.ENGLISH);
}
public byte getOp() {
return op;
}
public String getLowercase() {
return lowercase;
}
public static Operation readFrom(StreamInput in) throws IOException{
Byte opcode = in.readByte();
switch(opcode){
case 0:
return CREATE;
case 1:
return INDEX;
case 2:
return DELETE;
case 3:
return NOOP;
default:
throw new IllegalArgumentException("Unknown operation code: " + opcode);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeByte(op);
}
}
private ShardId shardId; private ShardId shardId;
private String id; private String id;
private String type; private String type;
private long version; private long version;
private boolean forcedRefresh; private boolean forcedRefresh;
protected Operation operation;
public DocWriteResponse(ShardId shardId, String type, String id, long version) { public DocWriteResponse(ShardId shardId, String type, String id, long version, Operation operation) {
this.shardId = shardId; this.shardId = shardId;
this.type = type; this.type = type;
this.id = id; this.id = id;
this.version = version; this.version = version;
this.operation = operation;
} }
// needed for deserialization // needed for deserialization
protected DocWriteResponse() { protected DocWriteResponse() {
} }
/**
* The change that occurred to the document.
*/
public Operation getOperation() {
return operation;
}
/** /**
* The index the document was changed in. * The index the document was changed in.
*/ */
@ -109,6 +165,30 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
return getShardInfo().status(); return getShardInfo().status();
} }
/**
* Gets the location of the written document as a string suitable for a {@code Location} header.
* @param routing any routing used in the request. If null the location doesn't include routing information.
*/
public String getLocation(@Nullable String routing) {
// Absolute path for the location of the document. This should be allowed as of HTTP/1.1:
// https://tools.ietf.org/html/rfc7231#section-7.1.2
String index = getIndex();
String type = getType();
String id = getId();
String routingStart = "?routing=";
int bufferSize = 3 + index.length() + type.length() + id.length();
if (routing != null) {
bufferSize += routingStart.length() + routing.length();
}
StringBuilder location = new StringBuilder(bufferSize);
location.append('/').append(index);
location.append('/').append(type);
location.append('/').append(id);
if (routing != null) {
location.append(routingStart).append(routing);
}
return location.toString();
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
@ -118,6 +198,7 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
id = in.readString(); id = in.readString();
version = in.readZLong(); version = in.readZLong();
forcedRefresh = in.readBoolean(); forcedRefresh = in.readBoolean();
operation = Operation.readFrom(in);
} }
@Override @Override
@ -128,22 +209,17 @@ public abstract class DocWriteResponse extends ReplicationResponse implements Wr
out.writeString(id); out.writeString(id);
out.writeZLong(version); out.writeZLong(version);
out.writeBoolean(forcedRefresh); out.writeBoolean(forcedRefresh);
} operation.writeTo(out);
static final class Fields {
static final String _INDEX = "_index";
static final String _TYPE = "_type";
static final String _ID = "_id";
static final String _VERSION = "_version";
} }
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
ReplicationResponse.ShardInfo shardInfo = getShardInfo(); ReplicationResponse.ShardInfo shardInfo = getShardInfo();
builder.field(Fields._INDEX, shardId.getIndexName()) builder.field("_index", shardId.getIndexName())
.field(Fields._TYPE, type) .field("_type", type)
.field(Fields._ID, id) .field("_id", id)
.field(Fields._VERSION, version) .field("_version", version)
.field("_operation", getOperation().getLowercase())
.field("forced_refresh", forcedRefresh); .field("forced_refresh", forcedRefresh);
shardInfo.toXContent(builder, params); shardInfo.toXContent(builder, params);
return builder; return builder;

View File

@ -248,7 +248,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
BytesReference indexSourceAsBytes = indexRequest.source(); BytesReference indexSourceAsBytes = indexRequest.source();
// add the response // add the response
IndexResponse indexResponse = result.getResponse(); IndexResponse indexResponse = result.getResponse();
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.isCreated()); UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getOperation());
if (updateRequest.fields() != null && updateRequest.fields().length > 0) { if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true); Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes)); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
@ -261,7 +261,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
WriteResult<DeleteResponse> writeResult = updateResult.writeResult; WriteResult<DeleteResponse> writeResult = updateResult.writeResult;
DeleteResponse response = writeResult.getResponse(); DeleteResponse response = writeResult.getResponse();
DeleteRequest deleteRequest = updateResult.request(); DeleteRequest deleteRequest = updateResult.request();
updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false); updateResponse = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getOperation());
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null)); updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), response.getVersion(), updateResult.result.updatedSourceAsMap(), updateResult.result.updateSourceContentType(), null));
// Replace the update request to the translated delete request to execute on the replica. // Replace the update request to the translated delete request to execute on the replica.
item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest); item = request.items()[requestIndex] = new BulkItemRequest(request.items()[requestIndex].id(), deleteRequest);

View File

@ -20,8 +20,6 @@
package org.elasticsearch.action.delete; package org.elasticsearch.action.delete;
import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
@ -36,52 +34,29 @@ import java.io.IOException;
*/ */
public class DeleteResponse extends DocWriteResponse { public class DeleteResponse extends DocWriteResponse {
private boolean found;
public DeleteResponse() { public DeleteResponse() {
} }
public DeleteResponse(ShardId shardId, String type, String id, long version, boolean found) { public DeleteResponse(ShardId shardId, String type, String id, long version, boolean found) {
super(shardId, type, id, version); super(shardId, type, id, version, found ? Operation.DELETE : Operation.NOOP);
this.found = found;
} }
/** /**
* Returns <tt>true</tt> if a doc was found to delete. * Returns <tt>true</tt> if a doc was found to delete.
*/ */
public boolean isFound() { public boolean isFound() {
return found; return operation == Operation.DELETE;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
found = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(found);
} }
@Override @Override
public RestStatus status() { public RestStatus status() {
if (found == false) { return isFound() ? super.status() : RestStatus.NOT_FOUND;
return RestStatus.NOT_FOUND;
}
return super.status();
}
static final class Fields {
static final String FOUND = "found";
} }
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.FOUND, isFound()); builder.field("found", isFound());
super.toXContent(builder, params); super.toXContent(builder, params);
return builder; return builder;
} }
@ -94,7 +69,7 @@ public class DeleteResponse extends DocWriteResponse {
builder.append(",type=").append(getType()); builder.append(",type=").append(getType());
builder.append(",id=").append(getId()); builder.append(",id=").append(getId());
builder.append(",version=").append(getVersion()); builder.append(",version=").append(getVersion());
builder.append(",found=").append(found); builder.append(",operation=").append(getOperation().getLowercase());
builder.append(",shards=").append(getShardInfo()); builder.append(",shards=").append(getShardInfo());
return builder.append("]").toString(); return builder.append("]").toString();
} }

View File

@ -36,42 +36,24 @@ import java.io.IOException;
*/ */
public class IndexResponse extends DocWriteResponse { public class IndexResponse extends DocWriteResponse {
private boolean created;
public IndexResponse() { public IndexResponse() {
} }
public IndexResponse(ShardId shardId, String type, String id, long version, boolean created) { public IndexResponse(ShardId shardId, String type, String id, long version, boolean created) {
super(shardId, type, id, version); super(shardId, type, id, version, created ? Operation.CREATE : Operation.INDEX);
this.created = created;
} }
/** /**
* Returns true if the document was created, false if updated. * Returns true if the document was created, false if updated.
*/ */
public boolean isCreated() { public boolean isCreated() {
return this.created; return this.operation == Operation.CREATE;
} }
@Override @Override
public RestStatus status() { public RestStatus status() {
if (created) { return isCreated() ? RestStatus.CREATED : super.status();
return RestStatus.CREATED;
}
return super.status();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
created = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(created);
} }
@Override @Override
@ -82,19 +64,15 @@ public class IndexResponse extends DocWriteResponse {
builder.append(",type=").append(getType()); builder.append(",type=").append(getType());
builder.append(",id=").append(getId()); builder.append(",id=").append(getId());
builder.append(",version=").append(getVersion()); builder.append(",version=").append(getVersion());
builder.append(",created=").append(created); builder.append(",operation=").append(getOperation().getLowercase());
builder.append(",shards=").append(getShardInfo()); builder.append(",shards=").append(getShardInfo());
return builder.append("]").toString(); return builder.append("]").toString();
} }
static final class Fields {
static final String CREATED = "created";
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
super.toXContent(builder, params); super.toXContent(builder, params);
builder.field(Fields.CREATED, isCreated()); builder.field("created", isCreated());
return builder; return builder;
} }
} }

View File

@ -21,26 +21,25 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.action.support.master.MasterNodeReadRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException; import java.io.IOException;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
public class GetPipelineRequest extends MasterNodeReadRequest<GetPipelineRequest> { public class GetPipelineRequest extends MasterNodeReadRequest<GetPipelineRequest> {
private String[] ids; private String[] ids;
public GetPipelineRequest(String... ids) { public GetPipelineRequest(String... ids) {
if (ids == null || ids.length == 0) { if (ids == null) {
throw new IllegalArgumentException("No ids specified"); throw new IllegalArgumentException("ids cannot be null");
} }
this.ids = ids; this.ids = ids;
} }
GetPipelineRequest() { GetPipelineRequest() {
this.ids = Strings.EMPTY_ARRAY;
} }
public String[] getIds() { public String[] getIds() {

View File

@ -41,15 +41,14 @@ final class WriteableIngestDocument implements Writeable, ToXContent {
WriteableIngestDocument(StreamInput in) throws IOException { WriteableIngestDocument(StreamInput in) throws IOException {
Map<String, Object> sourceAndMetadata = in.readMap(); Map<String, Object> sourceAndMetadata = in.readMap();
@SuppressWarnings("unchecked") Map<String, Object> ingestMetadata = in.readMap();
Map<String, String> ingestMetadata = (Map<String, String>) in.readGenericValue();
this.ingestDocument = new IngestDocument(sourceAndMetadata, ingestMetadata); this.ingestDocument = new IngestDocument(sourceAndMetadata, ingestMetadata);
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeMap(ingestDocument.getSourceAndMetadata()); out.writeMap(ingestDocument.getSourceAndMetadata());
out.writeGenericValue(ingestDocument.getIngestMetadata()); out.writeMap(ingestDocument.getIngestMetadata());
} }
IngestDocument getIngestDocument() { IngestDocument getIngestDocument() {
@ -66,11 +65,7 @@ final class WriteableIngestDocument implements Writeable, ToXContent {
} }
} }
builder.field("_source", ingestDocument.getSourceAndMetadata()); builder.field("_source", ingestDocument.getSourceAndMetadata());
builder.startObject("_ingest"); builder.field("_ingest", ingestDocument.getIngestMetadata());
for (Map.Entry<String, String> ingestMetadata : ingestDocument.getIngestMetadata().entrySet()) {
builder.field(ingestMetadata.getKey(), ingestMetadata.getValue());
}
builder.endObject();
builder.endObject(); builder.endObject();
return builder; return builder;
} }

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.update;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
@ -185,7 +186,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() { indexAction.execute(upsertRequest, new ActionListener<IndexResponse>() {
@Override @Override
public void onResponse(IndexResponse response) { public void onResponse(IndexResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.isCreated()); UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getOperation());
if (request.fields() != null && request.fields().length > 0) { if (request.fields() != null && request.fields().length > 0) {
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true); Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true);
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
@ -223,7 +224,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
indexAction.execute(indexRequest, new ActionListener<IndexResponse>() { indexAction.execute(indexRequest, new ActionListener<IndexResponse>() {
@Override @Override
public void onResponse(IndexResponse response) { public void onResponse(IndexResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.isCreated()); UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getOperation());
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes));
update.setForcedRefresh(response.forcedRefresh()); update.setForcedRefresh(response.forcedRefresh());
listener.onResponse(update); listener.onResponse(update);
@ -252,7 +253,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
deleteAction.execute(deleteRequest, new ActionListener<DeleteResponse>() { deleteAction.execute(deleteRequest, new ActionListener<DeleteResponse>() {
@Override @Override
public void onResponse(DeleteResponse response) { public void onResponse(DeleteResponse response) {
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), false); UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getOperation());
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null));
update.setForcedRefresh(response.forcedRefresh()); update.setForcedRefresh(response.forcedRefresh());
listener.onResponse(update); listener.onResponse(update);

View File

@ -116,7 +116,7 @@ public class UpdateHelper extends AbstractComponent {
request.script.getScript()); request.script.getScript());
} }
UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(),
getResult.getVersion(), false); getResult.getVersion(), UpdateResponse.convert(Operation.NONE));
update.setGetResult(getResult); update.setGetResult(getResult);
return new Result(update, Operation.NONE, upsertDoc, XContentType.JSON); return new Result(update, Operation.NONE, upsertDoc, XContentType.JSON);
} }
@ -234,12 +234,12 @@ public class UpdateHelper extends AbstractComponent {
.setRefreshPolicy(request.getRefreshPolicy()); .setRefreshPolicy(request.getRefreshPolicy());
return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType); return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType);
} else if ("none".equals(operation)) { } else if ("none".equals(operation)) {
UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false); UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), UpdateResponse.convert(Operation.NONE));
update.setGetResult(extractGetResult(request, request.index(), getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, getResult.internalSourceRef())); update.setGetResult(extractGetResult(request, request.index(), getResult.getVersion(), updatedSourceAsMap, updateSourceContentType, getResult.internalSourceRef()));
return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType); return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
} else { } else {
logger.warn("Used update operation [{}] for script [{}], doing nothing...", operation, request.script.getScript()); logger.warn("Used update operation [{}] for script [{}], doing nothing...", operation, request.script.getScript());
UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), false); UpdateResponse update = new UpdateResponse(shardId, getResult.getType(), getResult.getId(), getResult.getVersion(), UpdateResponse.convert(Operation.NONE));
return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType); return new Result(update, Operation.NONE, updatedSourceAsMap, updateSourceContentType);
} }
} }

View File

@ -29,11 +29,8 @@ import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
/**
*/
public class UpdateResponse extends DocWriteResponse { public class UpdateResponse extends DocWriteResponse {
private boolean created;
private GetResult getResult; private GetResult getResult;
public UpdateResponse() { public UpdateResponse() {
@ -43,14 +40,28 @@ public class UpdateResponse extends DocWriteResponse {
* Constructor to be used when a update didn't translate in a write. * Constructor to be used when a update didn't translate in a write.
* For example: update script with operation set to none * For example: update script with operation set to none
*/ */
public UpdateResponse(ShardId shardId, String type, String id, long version, boolean created) { public UpdateResponse(ShardId shardId, String type, String id, long version, Operation operation) {
this(new ShardInfo(0, 0), shardId, type, id, version, created); this(new ShardInfo(0, 0), shardId, type, id, version, operation);
} }
public UpdateResponse(ShardInfo shardInfo, ShardId shardId, String type, String id, long version, boolean created) { public UpdateResponse(ShardInfo shardInfo, ShardId shardId, String type, String id,
super(shardId, type, id, version); long version, Operation operation) {
super(shardId, type, id, version, operation);
setShardInfo(shardInfo); setShardInfo(shardInfo);
this.created = created; }
public static Operation convert(UpdateHelper.Operation op) {
switch(op) {
case UPSERT:
return Operation.CREATE;
case INDEX:
return Operation.INDEX;
case DELETE:
return Operation.DELETE;
case NONE:
return Operation.NOOP;
}
throw new IllegalArgumentException();
} }
public void setGetResult(GetResult getResult) { public void setGetResult(GetResult getResult) {
@ -65,22 +76,17 @@ public class UpdateResponse extends DocWriteResponse {
* Returns true if document was created due to an UPSERT operation * Returns true if document was created due to an UPSERT operation
*/ */
public boolean isCreated() { public boolean isCreated() {
return this.created; return this.operation == Operation.CREATE;
} }
@Override @Override
public RestStatus status() { public RestStatus status() {
if (created) { return isCreated() ? RestStatus.CREATED : super.status();
return RestStatus.CREATED;
}
return super.status();
} }
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
created = in.readBoolean();
if (in.readBoolean()) { if (in.readBoolean()) {
getResult = GetResult.readGetResult(in); getResult = GetResult.readGetResult(in);
} }
@ -89,7 +95,6 @@ public class UpdateResponse extends DocWriteResponse {
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out); super.writeTo(out);
out.writeBoolean(created);
if (getResult == null) { if (getResult == null) {
out.writeBoolean(false); out.writeBoolean(false);
} else { } else {
@ -122,7 +127,7 @@ public class UpdateResponse extends DocWriteResponse {
builder.append(",type=").append(getType()); builder.append(",type=").append(getType());
builder.append(",id=").append(getId()); builder.append(",id=").append(getId());
builder.append(",version=").append(getVersion()); builder.append(",version=").append(getVersion());
builder.append(",created=").append(created); builder.append(",operation=").append(getOperation().getLowercase());
builder.append(",shards=").append(getShardInfo()); builder.append(",shards=").append(getShardInfo());
return builder.append("]").toString(); return builder.append("]").toString();
} }

View File

@ -142,7 +142,8 @@ final class Bootstrap {
JvmInfo.jvmInfo(); JvmInfo.jvmInfo();
} }
private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception { private void setup(boolean addShutdownHook, Environment environment) throws Exception {
Settings settings = environment.settings();
initializeNatives( initializeNatives(
environment.tmpFile(), environment.tmpFile(),
BootstrapSettings.MEMORY_LOCK_SETTING.get(settings), BootstrapSettings.MEMORY_LOCK_SETTING.get(settings),
@ -171,7 +172,7 @@ final class Bootstrap {
// install SM after natives, shutdown hooks, etc. // install SM after natives, shutdown hooks, etc.
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings)); Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
node = new Node(settings) { node = new Node(environment) {
@Override @Override
protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) { protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) {
BootstrapCheck.check(settings, boundTransportAddress); BootstrapCheck.check(settings, boundTransportAddress);
@ -179,7 +180,7 @@ final class Bootstrap {
}; };
} }
private static Environment initialSettings(boolean foreground, Path pidFile, Map<String, String> esSettings) { private static Environment initialEnvironment(boolean foreground, Path pidFile, Map<String, String> esSettings) {
Terminal terminal = foreground ? Terminal.DEFAULT : null; Terminal terminal = foreground ? Terminal.DEFAULT : null;
Settings.Builder builder = Settings.builder(); Settings.Builder builder = Settings.builder();
if (pidFile != null) { if (pidFile != null) {
@ -225,9 +226,8 @@ final class Bootstrap {
INSTANCE = new Bootstrap(); INSTANCE = new Bootstrap();
Environment environment = initialSettings(foreground, pidFile, esSettings); Environment environment = initialEnvironment(foreground, pidFile, esSettings);
Settings settings = environment.settings(); LogConfigurator.configure(environment.settings(), true);
LogConfigurator.configure(settings, true);
checkForCustomConfFile(); checkForCustomConfFile();
if (environment.pidFile() != null) { if (environment.pidFile() != null) {
@ -250,9 +250,9 @@ final class Bootstrap {
// initialized as we do not want to grant the runtime permission // initialized as we do not want to grant the runtime permission
// setDefaultUncaughtExceptionHandler // setDefaultUncaughtExceptionHandler
Thread.setDefaultUncaughtExceptionHandler( Thread.setDefaultUncaughtExceptionHandler(
new ElasticsearchUncaughtExceptionHandler(() -> Node.NODE_NAME_SETTING.get(settings))); new ElasticsearchUncaughtExceptionHandler(() -> Node.NODE_NAME_SETTING.get(environment.settings())));
INSTANCE.setup(true, settings, environment); INSTANCE.setup(true, environment);
INSTANCE.start(); INSTANCE.start();
@ -266,7 +266,7 @@ final class Bootstrap {
} }
ESLogger logger = Loggers.getLogger(Bootstrap.class); ESLogger logger = Loggers.getLogger(Bootstrap.class);
if (INSTANCE.node != null) { if (INSTANCE.node != null) {
logger = Loggers.getLogger(Bootstrap.class, INSTANCE.node.settings().get("node.name")); logger = Loggers.getLogger(Bootstrap.class, Node.NODE_NAME_SETTING.get(INSTANCE.node.settings()));
} }
// HACK, it sucks to do this, but we will run users out of disk space otherwise // HACK, it sucks to do this, but we will run users out of disk space otherwise
if (e instanceof CreationException) { if (e instanceof CreationException) {

View File

@ -19,15 +19,6 @@
package org.elasticsearch.client.transport; package org.elasticsearch.client.transport;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.action.Action; import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
@ -63,6 +54,15 @@ import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpTransport; import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
/** /**
* The transport client allows to create a client that is not part of the cluster, but simply connects to one * The transport client allows to create a client that is not part of the cluster, but simply connects to one
* or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}. * or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}.
@ -100,6 +100,9 @@ public abstract class TransportClient extends AbstractClient {
private static ClientTemplate buildTemplate(Settings providedSettings, Settings defaultSettings, private static ClientTemplate buildTemplate(Settings providedSettings, Settings defaultSettings,
Collection<Class<? extends Plugin>> plugins) { Collection<Class<? extends Plugin>> plugins) {
if (Node.NODE_NAME_SETTING.exists(providedSettings) == false) {
providedSettings = Settings.builder().put(providedSettings).put(Node.NODE_NAME_SETTING.getKey(), "_client_").build();
}
final PluginsService pluginsService = newPluginService(providedSettings, plugins); final PluginsService pluginsService = newPluginService(providedSettings, plugins);
final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).build(); final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).build();
final List<Closeable> resourcesToClose = new ArrayList<>(); final List<Closeable> resourcesToClose = new ArrayList<>();

View File

@ -202,8 +202,10 @@ public class MetaDataCreateIndexService extends AbstractComponent {
if (response.isAcknowledged()) { if (response.isAcknowledged()) {
activeShardsObserver.waitForActiveShards(request.index(), request.waitForActiveShards(), request.ackTimeout(), activeShardsObserver.waitForActiveShards(request.index(), request.waitForActiveShards(), request.ackTimeout(),
shardsAcked -> { shardsAcked -> {
logger.debug("[{}] index created, but the operation timed out while waiting for " + if (shardsAcked == false) {
"enough shards to be started.", request.index()); logger.debug("[{}] index created, but the operation timed out while waiting for " +
"enough shards to be started.", request.index());
}
listener.onResponse(new CreateIndexClusterStateUpdateResponse(response.isAcknowledged(), shardsAcked)); listener.onResponse(new CreateIndexClusterStateUpdateResponse(response.isAcknowledged(), shardsAcked));
}, listener::onFailure); }, listener::onFailure);
} else { } else {

View File

@ -39,7 +39,6 @@ import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.function.Predicate; import java.util.function.Predicate;
import java.util.function.Supplier;
import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream; import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream;
@ -191,7 +190,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
} }
/** Creates a DiscoveryNode representing the local node. */ /** Creates a DiscoveryNode representing the local node. */
public static DiscoveryNode createLocal(Settings settings, TransportAddress publishAddress, String nodeIdSupplier) { public static DiscoveryNode createLocal(Settings settings, TransportAddress publishAddress, String nodeId) {
Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(settings).getAsMap()); Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(settings).getAsMap());
Set<DiscoveryNode.Role> roles = new HashSet<>(); Set<DiscoveryNode.Role> roles = new HashSet<>();
if (Node.NODE_INGEST_SETTING.get(settings)) { if (Node.NODE_INGEST_SETTING.get(settings)) {
@ -204,8 +203,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
roles.add(DiscoveryNode.Role.DATA); roles.add(DiscoveryNode.Role.DATA);
} }
return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeIdSupplier, publishAddress, return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress,attributes, roles, Version.CURRENT);
attributes, roles, Version.CURRENT);
} }
/** /**

View File

@ -33,6 +33,7 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
@ -509,7 +510,19 @@ public class Strings {
else return s.split(","); else return s.split(",");
} }
/**
* A convenience method for splitting a delimited string into
* a set and trimming leading and trailing whitespace from all
* split strings.
*
* @param s the string to split
* @param c the delimiter to split on
* @return the set of split strings
*/
public static Set<String> splitStringToSet(final String s, final char c) { public static Set<String> splitStringToSet(final String s, final char c) {
if (s == null || s.isEmpty()) {
return Collections.emptySet();
}
final char[] chars = s.toCharArray(); final char[] chars = s.toCharArray();
int count = 1; int count = 1;
for (final char x : chars) { for (final char x : chars) {
@ -521,16 +534,25 @@ public class Strings {
final int len = chars.length; final int len = chars.length;
int start = 0; // starting index in chars of the current substring. int start = 0; // starting index in chars of the current substring.
int pos = 0; // current index in chars. int pos = 0; // current index in chars.
int end = 0; // the position of the end of the current token
for (; pos < len; pos++) { for (; pos < len; pos++) {
if (chars[pos] == c) { if (chars[pos] == c) {
int size = pos - start; int size = end - start;
if (size > 0) { // only add non empty strings if (size > 0) { // only add non empty strings
result.add(new String(chars, start, size)); result.add(new String(chars, start, size));
} }
start = pos + 1; start = pos + 1;
end = start;
} else if (Character.isWhitespace(chars[pos])) {
if (start == pos) {
// skip over preceding whitespace
start++;
}
} else {
end = pos + 1;
} }
} }
int size = pos - start; int size = end - start;
if (size > 0) { if (size > 0) {
result.add(new String(chars, start, size)); result.add(new String(chars, start, size));
} }

View File

@ -21,7 +21,6 @@ package org.elasticsearch.common.bytes;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator; import org.apache.lucene.util.BytesRefIterator;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.common.util.ByteArray;
@ -36,24 +35,24 @@ public class PagedBytesReference extends BytesReference {
private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE; private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
private final BigArrays bigarrays; private final BigArrays bigarrays;
protected final ByteArray bytearray; protected final ByteArray byteArray;
private final int offset; private final int offset;
private final int length; private final int length;
public PagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int length) { public PagedBytesReference(BigArrays bigarrays, ByteArray byteArray, int length) {
this(bigarrays, bytearray, 0, length); this(bigarrays, byteArray, 0, length);
} }
public PagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int from, int length) { public PagedBytesReference(BigArrays bigarrays, ByteArray byteArray, int from, int length) {
this.bigarrays = bigarrays; this.bigarrays = bigarrays;
this.bytearray = bytearray; this.byteArray = byteArray;
this.offset = from; this.offset = from;
this.length = length; this.length = length;
} }
@Override @Override
public byte get(int index) { public byte get(int index) {
return bytearray.get(offset + index); return byteArray.get(offset + index);
} }
@Override @Override
@ -66,14 +65,14 @@ public class PagedBytesReference extends BytesReference {
if (from < 0 || (from + length) > length()) { if (from < 0 || (from + length) > length()) {
throw new IllegalArgumentException("can't slice a buffer with length [" + length() + "], with slice parameters from [" + from + "], length [" + length + "]"); throw new IllegalArgumentException("can't slice a buffer with length [" + length() + "], with slice parameters from [" + from + "], length [" + length + "]");
} }
return new PagedBytesReference(bigarrays, bytearray, offset + from, length); return new PagedBytesReference(bigarrays, byteArray, offset + from, length);
} }
@Override @Override
public BytesRef toBytesRef() { public BytesRef toBytesRef() {
BytesRef bref = new BytesRef(); BytesRef bref = new BytesRef();
// if length <= pagesize this will dereference the page, or materialize the byte[] // if length <= pagesize this will dereference the page, or materialize the byte[]
bytearray.get(offset, length, bref); byteArray.get(offset, length, bref);
return bref; return bref;
} }
@ -95,7 +94,7 @@ public class PagedBytesReference extends BytesReference {
@Override @Override
public BytesRef next() throws IOException { public BytesRef next() throws IOException {
if (nextFragmentSize != 0) { if (nextFragmentSize != 0) {
final boolean materialized = bytearray.get(offset + position, nextFragmentSize, slice); final boolean materialized = byteArray.get(offset + position, nextFragmentSize, slice);
assert materialized == false : "iteration should be page aligned but array got materialized"; assert materialized == false : "iteration should be page aligned but array got materialized";
position += nextFragmentSize; position += nextFragmentSize;
final int remaining = length - position; final int remaining = length - position;
@ -111,6 +110,6 @@ public class PagedBytesReference extends BytesReference {
@Override @Override
public long ramBytesUsed() { public long ramBytesUsed() {
return bytearray.ramBytesUsed(); return byteArray.ramBytesUsed();
} }
} }

View File

@ -30,12 +30,13 @@ import org.elasticsearch.common.util.ByteArray;
*/ */
public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable { public final class ReleasablePagedBytesReference extends PagedBytesReference implements Releasable {
public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int length) { public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray byteArray, int length) {
super(bigarrays, bytearray, length); super(bigarrays, byteArray, length);
} }
@Override @Override
public void close() { public void close() {
Releasables.close(bytearray); Releasables.close(byteArray);
} }
} }

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
/** /**
* *
@ -50,7 +51,7 @@ public abstract class AbstractComponent {
* Returns the nodes name from the settings or the empty string if not set. * Returns the nodes name from the settings or the empty string if not set.
*/ */
public final String nodeName() { public final String nodeName() {
return settings.get("node.name", ""); return Node.NODE_NAME_SETTING.get(settings);
} }
/** /**

View File

@ -28,4 +28,5 @@ public interface ReleasableBytesStream extends BytesStream {
@Override @Override
ReleasablePagedBytesReference bytes(); ReleasablePagedBytesReference bytes();
} }

View File

@ -33,7 +33,7 @@ import java.io.IOException;
*/ */
public class BytesStreamOutput extends StreamOutput implements BytesStream { public class BytesStreamOutput extends StreamOutput implements BytesStream {
protected final BigArrays bigarrays; protected final BigArrays bigArrays;
protected ByteArray bytes; protected ByteArray bytes;
protected int count; protected int count;
@ -57,9 +57,9 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
this(expectedSize, BigArrays.NON_RECYCLING_INSTANCE); this(expectedSize, BigArrays.NON_RECYCLING_INSTANCE);
} }
protected BytesStreamOutput(int expectedSize, BigArrays bigarrays) { protected BytesStreamOutput(int expectedSize, BigArrays bigArrays) {
this.bigarrays = bigarrays; this.bigArrays = bigArrays;
this.bytes = bigarrays.newByteArray(expectedSize); this.bytes = bigArrays.newByteArray(expectedSize);
} }
@Override @Override
@ -100,7 +100,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
public void reset() { public void reset() {
// shrink list of pages // shrink list of pages
if (bytes.size() > BigArrays.PAGE_SIZE_IN_BYTES) { if (bytes.size() > BigArrays.PAGE_SIZE_IN_BYTES) {
bytes = bigarrays.resize(bytes, BigArrays.PAGE_SIZE_IN_BYTES); bytes = bigArrays.resize(bytes, BigArrays.PAGE_SIZE_IN_BYTES);
} }
// go back to start // go back to start
@ -145,7 +145,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
@Override @Override
public BytesReference bytes() { public BytesReference bytes() {
return new PagedBytesReference(bigarrays, bytes, count); return new PagedBytesReference(bigArrays, bytes, count);
} }
/** /**
@ -157,7 +157,7 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream {
} }
private void ensureCapacity(int offset) { private void ensureCapacity(int offset) {
bytes = bigarrays.grow(bytes, offset); bytes = bigArrays.grow(bytes, offset);
} }
} }

View File

@ -36,12 +36,13 @@ public class ReleasableBytesStreamOutput extends BytesStreamOutput implements Re
super(BigArrays.PAGE_SIZE_IN_BYTES, bigarrays); super(BigArrays.PAGE_SIZE_IN_BYTES, bigarrays);
} }
public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigarrays) { public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigArrays) {
super(expectedSize, bigarrays); super(expectedSize, bigArrays);
} }
@Override @Override
public ReleasablePagedBytesReference bytes() { public ReleasablePagedBytesReference bytes() {
return new ReleasablePagedBytesReference(bigarrays, bytes, count); return new ReleasablePagedBytesReference(bigArrays, bytes, count);
} }
} }

View File

@ -30,4 +30,5 @@ public interface Releasable extends Closeable {
@Override @Override
void close(); void close();
} }

View File

@ -24,6 +24,7 @@ import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.node.Node;
import java.net.InetAddress; import java.net.InetAddress;
import java.net.UnknownHostException; import java.net.UnknownHostException;
@ -101,9 +102,8 @@ public class Loggers {
prefixesList.add(addr.getHostName()); prefixesList.add(addr.getHostName());
} }
} }
String name = settings.get("node.name"); if (Node.NODE_NAME_SETTING.exists(settings)) {
if (name != null) { prefixesList.add(Node.NODE_NAME_SETTING.get(settings));
prefixesList.add(name);
} }
if (prefixes != null && prefixes.length > 0) { if (prefixes != null && prefixes.length > 0) {
prefixesList.addAll(asList(prefixes)); prefixesList.addAll(asList(prefixes));

View File

@ -55,6 +55,12 @@ public class NetworkExceptionHelper {
if (e.getMessage().contains("Connection timed out")) { if (e.getMessage().contains("Connection timed out")) {
return true; return true;
} }
if (e.getMessage().equals("Socket is closed")) {
return true;
}
if (e.getMessage().equals("Socket closed")) {
return true;
}
} }
return false; return false;
} }

View File

@ -429,6 +429,10 @@ public class BigArrays implements Releasable {
return this.circuitBreakingInstance; return this.circuitBreakingInstance;
} }
public CircuitBreakerService breakerService() {
return this.circuitBreakingInstance.breakerService;
}
private <T extends AbstractBigArray> T resizeInPlace(T array, long newSize) { private <T extends AbstractBigArray> T resizeInPlace(T array, long newSize) {
final long oldMemSize = array.ramBytesUsed(); final long oldMemSize = array.ramBytesUsed();
array.resize(newSize); array.resize(newSize);

View File

@ -22,6 +22,7 @@ package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.Node;
import java.util.Arrays; import java.util.Arrays;
import java.util.concurrent.BlockingQueue; import java.util.concurrent.BlockingQueue;
@ -83,11 +84,10 @@ public class EsExecutors {
} }
public static String threadName(Settings settings, String namePrefix) { public static String threadName(Settings settings, String namePrefix) {
String nodeName = settings.get("node.name"); if (Node.NODE_NAME_SETTING.exists(settings)) {
if (nodeName == null) { return threadName(Node.NODE_NAME_SETTING.get(settings), namePrefix);
return threadName("", namePrefix);
} else { } else {
return threadName(nodeName, namePrefix); return threadName("", namePrefix);
} }
} }

View File

@ -21,7 +21,8 @@ package org.elasticsearch.common.xcontent;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
/** /**
* * Objects that can both render themselves in as json/yaml/etc and can provide a {@link RestStatus} for their response. Usually should be
* implemented by top level responses sent back to users from REST endpoints.
*/ */
public interface StatusToXContent extends ToXContent { public interface StatusToXContent extends ToXContent {

View File

@ -36,6 +36,7 @@ import java.nio.file.Path;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Objects;
import java.util.function.Function; import java.util.function.Function;
import static org.elasticsearch.common.Strings.cleanPath; import static org.elasticsearch.common.Strings.cleanPath;
@ -107,7 +108,6 @@ public class Environment {
} }
public Environment(Settings settings) { public Environment(Settings settings) {
this.settings = settings;
final Path homeFile; final Path homeFile;
if (PATH_HOME_SETTING.exists(settings)) { if (PATH_HOME_SETTING.exists(settings)) {
homeFile = PathUtils.get(cleanPath(PATH_HOME_SETTING.get(settings))); homeFile = PathUtils.get(cleanPath(PATH_HOME_SETTING.get(settings)));
@ -171,6 +171,13 @@ public class Environment {
binFile = homeFile.resolve("bin"); binFile = homeFile.resolve("bin");
libFile = homeFile.resolve("lib"); libFile = homeFile.resolve("lib");
modulesFile = homeFile.resolve("modules"); modulesFile = homeFile.resolve("modules");
Settings.Builder finalSettings = Settings.builder().put(settings);
finalSettings.put(PATH_HOME_SETTING.getKey(), homeFile);
finalSettings.putArray(PATH_DATA_SETTING.getKey(), dataPaths);
finalSettings.put(PATH_LOGS_SETTING.getKey(), logsFile);
this.settings = finalSettings.build();
} }
/** /**
@ -332,4 +339,26 @@ public class Environment {
public static FileStore getFileStore(Path path) throws IOException { public static FileStore getFileStore(Path path) throws IOException {
return ESFileStore.getMatchingFileStore(path, fileStores); return ESFileStore.getMatchingFileStore(path, fileStores);
} }
/**
* asserts that the two environments are equivalent for all things the environment cares about (i.e., all but the setting
* object which may contain different setting)
*/
public static void assertEquivalent(Environment actual, Environment expected) {
assertEquals(actual.dataWithClusterFiles(), expected.dataWithClusterFiles(), "dataWithClusterFiles");
assertEquals(actual.repoFiles(), expected.repoFiles(), "repoFiles");
assertEquals(actual.configFile(), expected.configFile(), "configFile");
assertEquals(actual.scriptsFile(), expected.scriptsFile(), "scriptsFile");
assertEquals(actual.pluginsFile(), expected.pluginsFile(), "pluginsFile");
assertEquals(actual.binFile(), expected.binFile(), "binFile");
assertEquals(actual.libFile(), expected.libFile(), "libFile");
assertEquals(actual.modulesFile(), expected.modulesFile(), "modulesFile");
assertEquals(actual.logsFile(), expected.logsFile(), "logsFile");
assertEquals(actual.pidFile(), expected.pidFile(), "pidFile");
assertEquals(actual.tmpFile(), expected.tmpFile(), "tmpFile");
}
private static void assertEquals(Object actual, Object expected, String name) {
assert Objects.deepEquals(actual, expected) : "actual " + name + " [" + actual + "] is different than [ " + expected + "]";
}
} }

View File

@ -34,10 +34,10 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.FileSystemUtils;
import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -52,6 +52,7 @@ import org.elasticsearch.index.store.FsDirectoryService;
import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.fs.FsProbe;
import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.node.Node;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -79,7 +80,9 @@ import static java.util.Collections.unmodifiableSet;
/** /**
* A component that holds all data paths for a single node. * A component that holds all data paths for a single node.
*/ */
public final class NodeEnvironment extends AbstractComponent implements Closeable { public final class NodeEnvironment implements Closeable {
private final ESLogger logger;
public static class NodePath { public static class NodePath {
/* ${data.paths}/nodes/{node.id} */ /* ${data.paths}/nodes/{node.id} */
@ -139,8 +142,6 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
private final Path sharedDataPath; private final Path sharedDataPath;
private final Lock[] locks; private final Lock[] locks;
private final boolean addLockIdToCustomPath;
private final int nodeLockId; private final int nodeLockId;
private final AtomicBoolean closed = new AtomicBoolean(false); private final AtomicBoolean closed = new AtomicBoolean(false);
private final Map<ShardId, InternalShardLock> shardLocks = new HashMap<>(); private final Map<ShardId, InternalShardLock> shardLocks = new HashMap<>();
@ -177,12 +178,8 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
public static final String NODES_FOLDER = "nodes"; public static final String NODES_FOLDER = "nodes";
public static final String INDICES_FOLDER = "indices"; public static final String INDICES_FOLDER = "indices";
public static final String NODE_LOCK_FILENAME = "node.lock"; public static final String NODE_LOCK_FILENAME = "node.lock";
public static final String UPGRADE_LOCK_FILENAME = "upgrade.lock";
public NodeEnvironment(Settings settings, Environment environment) throws IOException { public NodeEnvironment(Settings settings, Environment environment) throws IOException {
super(settings);
this.addLockIdToCustomPath = ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.get(settings);
if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) { if (!DiscoveryNode.nodeRequiresLocalStorage(settings)) {
nodePaths = null; nodePaths = null;
@ -190,12 +187,16 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
locks = null; locks = null;
nodeLockId = -1; nodeLockId = -1;
nodeMetaData = new NodeMetaData(generateNodeId(settings)); nodeMetaData = new NodeMetaData(generateNodeId(settings));
logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
return; return;
} }
final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length]; final NodePath[] nodePaths = new NodePath[environment.dataWithClusterFiles().length];
final Lock[] locks = new Lock[nodePaths.length]; final Lock[] locks = new Lock[nodePaths.length];
boolean success = false; boolean success = false;
// trace logger to debug issues before the default node name is derived from the node id
ESLogger startupTraceLogger = Loggers.getLogger(getClass(), settings);
try { try {
sharedDataPath = environment.sharedDataFile(); sharedDataPath = environment.sharedDataFile();
int nodeLockId = -1; int nodeLockId = -1;
@ -207,7 +208,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
Path dataDir = environment.dataFiles()[dirIndex]; Path dataDir = environment.dataFiles()[dirIndex];
// TODO: Remove this in 6.0, we are no longer going to read from the cluster name directory // TODO: Remove this in 6.0, we are no longer going to read from the cluster name directory
if (readFromDataPathWithClusterName(dataDirWithClusterName)) { if (readFromDataPathWithClusterName(dataDirWithClusterName)) {
DeprecationLogger deprecationLogger = new DeprecationLogger(logger); DeprecationLogger deprecationLogger = new DeprecationLogger(startupTraceLogger);
deprecationLogger.deprecated("ES has detected the [path.data] folder using the cluster name as a folder [{}], " + deprecationLogger.deprecated("ES has detected the [path.data] folder using the cluster name as a folder [{}], " +
"Elasticsearch 6.0 will not allow the cluster name as a folder within the data path", dataDir); "Elasticsearch 6.0 will not allow the cluster name as a folder within the data path", dataDir);
dataDir = dataDirWithClusterName; dataDir = dataDirWithClusterName;
@ -216,20 +217,20 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
Files.createDirectories(dir); Files.createDirectories(dir);
try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) {
logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); startupTraceLogger.trace("obtaining node lock on {} ...", dir.toAbsolutePath());
try { try {
locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME); locks[dirIndex] = luceneDir.obtainLock(NODE_LOCK_FILENAME);
nodePaths[dirIndex] = new NodePath(dir); nodePaths[dirIndex] = new NodePath(dir);
nodeLockId = possibleLockId; nodeLockId = possibleLockId;
} catch (LockObtainFailedException ex) { } catch (LockObtainFailedException ex) {
logger.trace("failed to obtain node lock on {}", dir.toAbsolutePath()); startupTraceLogger.trace("failed to obtain node lock on {}", dir.toAbsolutePath());
// release all the ones that were obtained up until now // release all the ones that were obtained up until now
releaseAndNullLocks(locks); releaseAndNullLocks(locks);
break; break;
} }
} catch (IOException e) { } catch (IOException e) {
logger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath()); startupTraceLogger.trace("failed to obtain node lock on {}", e, dir.toAbsolutePath());
lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e); lastException = new IOException("failed to obtain lock on " + dir.toAbsolutePath(), e);
// release all the ones that were obtained up until now // release all the ones that were obtained up until now
releaseAndNullLocks(locks); releaseAndNullLocks(locks);
@ -246,6 +247,8 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: " throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: "
+ Arrays.toString(environment.dataWithClusterFiles()), lastException); + Arrays.toString(environment.dataWithClusterFiles()), lastException);
} }
this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths);
this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
this.nodeLockId = nodeLockId; this.nodeLockId = nodeLockId;
this.locks = locks; this.locks = locks;
@ -258,8 +261,6 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
maybeLogPathDetails(); maybeLogPathDetails();
maybeLogHeapDetails(); maybeLogHeapDetails();
this.nodeMetaData = loadOrCreateNodeMetaData(settings, logger, nodePaths);
applySegmentInfosTrace(settings); applySegmentInfosTrace(settings);
assertCanWrite(); assertCanWrite();
success = true; success = true;
@ -924,10 +925,6 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
} }
} }
Settings getSettings() { // for testing
return settings;
}
/** /**
* Resolve the custom path for a index's shard. * Resolve the custom path for a index's shard.
* Uses the {@code IndexMetaData.SETTING_DATA_PATH} setting to determine * Uses the {@code IndexMetaData.SETTING_DATA_PATH} setting to determine
@ -940,7 +937,7 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
if (customDataDir != null) { if (customDataDir != null) {
// This assert is because this should be caught by MetaDataCreateIndexService // This assert is because this should be caught by MetaDataCreateIndexService
assert sharedDataPath != null; assert sharedDataPath != null;
if (addLockIdToCustomPath) { if (ADD_NODE_LOCK_ID_TO_CUSTOM_PATH.get(indexSettings.getNodeSettings())) {
return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId)); return sharedDataPath.resolve(customDataDir).resolve(Integer.toString(this.nodeLockId));
} else { } else {
return sharedDataPath.resolve(customDataDir); return sharedDataPath.resolve(customDataDir);

View File

@ -29,4 +29,5 @@ import org.elasticsearch.rest.RestRequest;
public interface HttpServerAdapter { public interface HttpServerAdapter {
void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext context); void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext context);
} }

View File

@ -22,9 +22,6 @@ package org.elasticsearch.http;
import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.BoundTransportAddress;
/**
*
*/
public interface HttpServerTransport extends LifecycleComponent { public interface HttpServerTransport extends LifecycleComponent {
BoundTransportAddress boundAddress(); BoundTransportAddress boundAddress();
@ -34,4 +31,5 @@ public interface HttpServerTransport extends LifecycleComponent {
HttpStats stats(); HttpStats stats();
void httpServerAdapter(HttpServerAdapter httpServerAdapter); void httpServerAdapter(HttpServerAdapter httpServerAdapter);
} }

View File

@ -40,9 +40,9 @@ public final class HttpTransportSettings {
public static final Setting<Integer> SETTING_CORS_MAX_AGE = public static final Setting<Integer> SETTING_CORS_MAX_AGE =
Setting.intSetting("http.cors.max-age", 1728000, Property.NodeScope); Setting.intSetting("http.cors.max-age", 1728000, Property.NodeScope);
public static final Setting<String> SETTING_CORS_ALLOW_METHODS = public static final Setting<String> SETTING_CORS_ALLOW_METHODS =
new Setting<>("http.cors.allow-methods", "OPTIONS, HEAD, GET, POST, PUT, DELETE", (value) -> value, Property.NodeScope); new Setting<>("http.cors.allow-methods", "OPTIONS,HEAD,GET,POST,PUT,DELETE", (value) -> value, Property.NodeScope);
public static final Setting<String> SETTING_CORS_ALLOW_HEADERS = public static final Setting<String> SETTING_CORS_ALLOW_HEADERS =
new Setting<>("http.cors.allow-headers", "X-Requested-With, Content-Type, Content-Length", (value) -> value, Property.NodeScope); new Setting<>("http.cors.allow-headers", "X-Requested-With,Content-Type,Content-Length", (value) -> value, Property.NodeScope);
public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS = public static final Setting<Boolean> SETTING_CORS_ALLOW_CREDENTIALS =
Setting.boolSetting("http.cors.allow-credentials", false, Property.NodeScope); Setting.boolSetting("http.cors.allow-credentials", false, Property.NodeScope);
public static final Setting<Boolean> SETTING_PIPELINING = public static final Setting<Boolean> SETTING_PIPELINING =

Some files were not shown because too many files have changed in this diff Show More