Merge branch 'master' into docs/add_console_to_search

This commit is contained in:
Isabel Drost-Fromm 2016-07-25 11:54:26 +02:00
commit 00a8516780
2269 changed files with 56947 additions and 33899 deletions

View File

@ -3,7 +3,11 @@ GitHub is reserved for bug reports and feature requests. The best place
to ask a general question is at the Elastic Discourse forums at
https://discuss.elastic.co. If you are in fact posting a bug report or
a feature request, please include one and only one of the below blocks
in your new issue.
in your new issue. Note that whether you're filing a bug report or a
feature request, ensure that your submission is for an
[OS that we support](https://www.elastic.co/support/matrix#show_os).
Bug reports on an OS that we do not support or feature requests
specific to an OS that we do not support will be closed.
-->
<!--

6
Vagrantfile vendored
View File

@ -42,7 +42,7 @@ Vagrant.configure(2) do |config|
# debian and it works fine.
config.vm.define "debian-8" do |config|
config.vm.box = "elastic/debian-8-x86_64"
deb_common config, 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
deb_common config, 'echo deb http://cloudfront.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
end
config.vm.define "centos-6" do |config|
config.vm.box = "elastic/centos-6-x86_64"
@ -60,8 +60,8 @@ Vagrant.configure(2) do |config|
config.vm.box = "elastic/oraclelinux-7-x86_64"
rpm_common config
end
config.vm.define "fedora-22" do |config|
config.vm.box = "elastic/fedora-22-x86_64"
config.vm.define "fedora-24" do |config|
config.vm.box = "elastic/fedora-24-x86_64"
dnf_common config
end
config.vm.define "opensuse-13" do |config|

View File

@ -31,7 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.gateway.GatewayAllocator;
@ -102,7 +102,7 @@ public final class Allocators {
}
public static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) {
return new DiscoveryNode("", nodeId, DummyTransportAddress.INSTANCE, attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER,
return new DiscoveryNode("", nodeId, LocalTransportAddress.buildUnique(), attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER,
DiscoveryNode.Role.DATA), Version.CURRENT);
}
}

View File

@ -173,6 +173,12 @@ subprojects {
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
// for transport client
"org.elasticsearch.plugin:transport-netty3-client:${version}": ':modules:transport-netty3',
"org.elasticsearch.plugin:transport-netty4-client:${version}": ':modules:transport-netty4',
"org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex',
"org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache',
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
]
configurations.all {
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->

View File

@ -19,7 +19,6 @@
package org.elasticsearch.gradle
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.GradleException
import org.gradle.api.JavaVersion
@ -35,6 +34,7 @@ import org.gradle.api.artifacts.ResolvedArtifact
import org.gradle.api.artifacts.dsl.RepositoryHandler
import org.gradle.api.artifacts.maven.MavenPom
import org.gradle.api.publish.maven.MavenPublication
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
import org.gradle.api.tasks.bundling.Jar
import org.gradle.api.tasks.compile.JavaCompile
@ -297,6 +297,10 @@ class BuildPlugin implements Plugin<Project> {
url "http://s3.amazonaws.com/download.elasticsearch.org/lucenesnapshots/${revision}"
}
}
repos.maven {
name 'netty-snapshots'
url "http://s3.amazonaws.com/download.elasticsearch.org/nettysnapshots/20160722"
}
}
/** Returns a closure which can be used with a MavenPom for removing transitive dependencies. */
@ -344,7 +348,7 @@ class BuildPlugin implements Plugin<Project> {
/**Configuration generation of maven poms. */
public static void configurePomGeneration(Project project) {
project.plugins.withType(MavenBasePublishPlugin.class).whenPluginAdded {
project.plugins.withType(MavenPublishPlugin.class).whenPluginAdded {
project.publishing {
publications {
all { MavenPublication publication -> // we only deal with maven

View File

@ -131,8 +131,9 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
}
private void response(Snippet response) {
current.println(" - response_body: |")
response.contents.eachLine { current.println(" $it") }
current.println(" - match: ")
current.println(" \$body: ")
response.contents.eachLine { current.println(" $it") }
}
void emitDo(String method, String pathAndQuery,
@ -183,13 +184,6 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
current.println('---')
current.println("setup:")
body(setup, true)
// always wait for yellow before anything is executed
current.println(
" - do:\n" +
" raw:\n" +
" method: GET\n" +
" path: \"_cluster/health\"\n" +
" wait_for_status: \"yellow\"")
}
private void body(Snippet snippet, boolean inSetup) {

View File

@ -18,14 +18,23 @@
*/
package org.elasticsearch.gradle.plugin
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
import nebula.plugin.publishing.maven.MavenScmPlugin
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.test.RestIntegTestTask
import org.elasticsearch.gradle.test.RunTask
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.XmlProvider
import org.gradle.api.publish.maven.MavenPublication
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
import org.gradle.api.tasks.SourceSet
import org.gradle.api.tasks.bundling.Zip
import java.nio.file.Files
import java.nio.file.Path
import java.nio.file.StandardCopyOption
import java.util.regex.Matcher
import java.util.regex.Pattern
/**
* Encapsulates build configuration for an Elasticsearch plugin.
*/
@ -38,19 +47,35 @@ public class PluginBuildPlugin extends BuildPlugin {
// this afterEvaluate must happen before the afterEvaluate added by integTest creation,
// so that the file name resolution for installing the plugin will be setup
project.afterEvaluate {
boolean isModule = project.path.startsWith(':modules:')
String name = project.pluginProperties.extension.name
project.jar.baseName = name
project.bundlePlugin.baseName = name
if (project.pluginProperties.extension.hasClientJar) {
// for plugins which work with the transport client, we copy the jar
// file to a new name, copy the nebula generated pom to the same name,
// and generate a different pom for the zip
project.signArchives.enabled = false
addJarPomGeneration(project)
addClientJarTask(project)
if (isModule == false) {
addZipPomGeneration(project)
}
} else {
// no client plugin, so use the pom file from nebula, without jar, for the zip
project.ext.set("nebulaPublish.maven.jar", false)
}
project.integTest.dependsOn(project.bundlePlugin)
project.tasks.run.dependsOn(project.bundlePlugin)
if (project.path.startsWith(':modules:')) {
if (isModule) {
project.integTest.clusterConfig.module(project)
project.tasks.run.clusterConfig.module(project)
} else {
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
addPomGeneration(project)
project.integTest.clusterConfig.plugin(project.path)
project.tasks.run.clusterConfig.plugin(project.path)
addZipPomGeneration(project)
}
project.namingConventions {
@ -60,6 +85,7 @@ public class PluginBuildPlugin extends BuildPlugin {
}
createIntegTestTask(project)
createBundleTask(project)
project.configurations.getByName('default').extendsFrom(project.configurations.getByName('runtime'))
project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build
}
@ -118,40 +144,93 @@ public class PluginBuildPlugin extends BuildPlugin {
}
project.assemble.dependsOn(bundle)
// remove jar from the archives (things that will be published), and set it to the zip
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
project.artifacts.add('archives', bundle)
// also make the zip the default artifact (used when depending on this project)
project.configurations.getByName('default').extendsFrom = []
project.artifacts.add('default', bundle)
// also make the zip available as a configuration (used when depending on this project)
project.configurations.create('zip')
project.artifacts.add('zip', bundle)
}
/**
* Adds the plugin jar and zip as publications.
*/
protected static void addPomGeneration(Project project) {
project.plugins.apply(MavenBasePublishPlugin.class)
project.plugins.apply(MavenScmPlugin.class)
/** Adds a task to move jar and associated files to a "-client" name. */
protected static void addClientJarTask(Project project) {
Task clientJar = project.tasks.create('clientJar')
clientJar.dependsOn('generatePomFileForJarPublication', project.jar, project.javadocJar, project.sourcesJar)
clientJar.doFirst {
Path jarFile = project.jar.outputs.files.singleFile.toPath()
String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}")
Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING)
String pomFileName = jarFile.fileName.toString().replace('.jar', '.pom')
String clientPomFileName = clientFileName.replace('.jar', '.pom')
Files.copy(jarFile.resolveSibling(pomFileName), jarFile.resolveSibling(clientPomFileName),
StandardCopyOption.REPLACE_EXISTING)
String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar')
String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar')
Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName),
StandardCopyOption.REPLACE_EXISTING)
String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar')
String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar')
Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName),
StandardCopyOption.REPLACE_EXISTING)
}
project.assemble.dependsOn(clientJar)
}
static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/)
/** Find the reponame. */
protected static String urlFromOrigin(String origin) {
if (origin.startsWith('https')) {
return origin
}
Matcher matcher = GIT_PATTERN.matcher(origin)
if (matcher.matches()) {
return "https://${matcher.group(1)}/${matcher.group(2)}"
} else {
return origin // best effort, the url doesnt really matter, it is just required by maven central
}
}
/** Adds nebula publishing task to generate a pom file for the plugin. */
protected static void addJarPomGeneration(Project project) {
project.plugins.apply(MavenPublishPlugin.class)
project.publishing {
publications {
nebula {
artifact project.bundlePlugin
pom.withXml {
// overwrite the name/description in the pom nebula set up
Node root = asNode()
for (Node node : root.children()) {
if (node.name() == 'name') {
node.setValue(project.pluginProperties.extension.name)
} else if (node.name() == 'description') {
node.setValue(project.pluginProperties.extension.description)
}
}
jar(MavenPublication) {
from project.components.java
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.appendNode('name', project.pluginProperties.extension.name)
root.appendNode('description', project.pluginProperties.extension.description)
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
Node scmNode = root.appendNode('scm')
scmNode.appendNode('url', project.scminfo.origin)
}
}
}
}
}
/** Adds a task to generate a*/
protected void addZipPomGeneration(Project project) {
project.plugins.apply(MavenPublishPlugin.class)
project.publishing {
publications {
zip(MavenPublication) {
artifact project.bundlePlugin
pom.packaging = 'pom'
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.appendNode('name', project.pluginProperties.extension.name)
root.appendNode('description', project.pluginProperties.extension.description)
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
Node scmNode = root.appendNode('scm')
scmNode.appendNode('url', project.scminfo.origin)
}
}
}
}
}
}

View File

@ -39,6 +39,10 @@ class PluginPropertiesExtension {
@Input
String classname
/** Indicates whether the plugin jar should be made available for the transport client. */
@Input
boolean hasClientJar = false
PluginPropertiesExtension(Project project) {
name = project.name
version = project.version

View File

@ -20,12 +20,15 @@ package org.elasticsearch.gradle.test
import org.gradle.api.GradleException
import org.gradle.api.Project
import org.gradle.api.artifacts.Configuration
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.Input
/** Configuration for an elasticsearch cluster, used for integration tests. */
class ClusterConfiguration {
private final Project project
@Input
String distribution = 'integ-test-zip'
@ -77,6 +80,10 @@ class ClusterConfiguration {
return tmpFile.exists()
}
public ClusterConfiguration(Project project) {
this.project = project
}
Map<String, String> systemProperties = new HashMap<>()
Map<String, String> settings = new HashMap<>()
@ -84,7 +91,7 @@ class ClusterConfiguration {
// map from destination path, to source file
Map<String, Object> extraConfigFiles = new HashMap<>()
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
LinkedHashMap<String, Project> plugins = new LinkedHashMap<>()
List<Project> modules = new ArrayList<>()
@ -101,13 +108,9 @@ class ClusterConfiguration {
}
@Input
void plugin(String name, FileCollection file) {
plugins.put(name, file)
}
@Input
void plugin(String name, Project pluginProject) {
plugins.put(name, pluginProject)
void plugin(String path) {
Project pluginProject = project.project(path)
plugins.put(pluginProject.name, pluginProject)
}
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */

View File

@ -167,7 +167,7 @@ class ClusterFormationTasks {
}
// install plugins
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
}
@ -326,38 +326,34 @@ class ClusterFormationTasks {
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
List<FileCollection> pluginFiles = []
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
FileCollection pluginZip
if (plugin.getValue() instanceof Project) {
Project pluginProject = plugin.getValue()
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
}
String configurationName = "_plugin_${pluginProject.path}"
Configuration configuration = project.configurations.findByName(configurationName)
if (configuration == null) {
configuration = project.configurations.create(configurationName)
}
project.dependencies.add(configurationName, pluginProject)
setup.dependsOn(pluginProject.tasks.bundlePlugin)
pluginZip = configuration
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
// also allow rest tests to use the rest spec from the plugin
Copy copyRestSpec = null
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
if (restApiDir.exists() == false) continue
if (copyRestSpec == null) {
copyRestSpec = project.tasks.create(name: pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec'), type: Copy)
copyPlugins.dependsOn(copyRestSpec)
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
}
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
}
} else {
pluginZip = plugin.getValue()
Project pluginProject = plugin.getValue()
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
}
pluginFiles.add(pluginZip)
String configurationName = "_plugin_${pluginProject.path}"
Configuration configuration = project.configurations.findByName(configurationName)
if (configuration == null) {
configuration = project.configurations.create(configurationName)
}
project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip'))
setup.dependsOn(pluginProject.tasks.bundlePlugin)
// also allow rest tests to use the rest spec from the plugin
String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec')
Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName)
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
if (restApiDir.exists() == false) continue
if (copyRestSpec == null) {
copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy)
copyPlugins.dependsOn(copyRestSpec)
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
}
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
}
pluginFiles.add(configuration)
}
copyPlugins.into(node.pluginsTmpDir)
@ -379,15 +375,10 @@ class ClusterFormationTasks {
return installModule
}
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Object plugin) {
FileCollection pluginZip
if (plugin instanceof Project) {
pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
} else {
pluginZip = plugin
}
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin) {
FileCollection pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
// delay reading the file location until execution time by wrapping in a closure within a GString
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
Object[] args = [new File(node.homeDir, 'bin/elasticsearch-plugin'), 'install', file]
return configureExecTask(name, project, setup, node, args)
}

View File

@ -32,7 +32,7 @@ import org.gradle.util.ConfigureUtil
*/
public class RestIntegTestTask extends RandomizedTestingTask {
ClusterConfiguration clusterConfig = new ClusterConfiguration()
ClusterConfiguration clusterConfig
/** Flag indicating whether the rest tests in the rest spec should be run. */
@Input
@ -44,6 +44,7 @@ public class RestIntegTestTask extends RandomizedTestingTask {
dependsOn(project.testClasses)
classpath = project.sourceSets.test.runtimeClasspath
testClassesDir = project.sourceSets.test.output.classesDir
clusterConfig = new ClusterConfiguration(project)
// start with the common test configuration
configure(BuildPlugin.commonTestConfig(project))

View File

@ -7,11 +7,15 @@ import org.gradle.util.ConfigureUtil
public class RunTask extends DefaultTask {
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
ClusterConfiguration clusterConfig
public RunTask() {
description = "Runs elasticsearch with '${project.path}'"
group = 'Verification'
clusterConfig = new ClusterConfiguration(project)
clusterConfig.httpPort = 9200
clusterConfig.transportPort = 9300
clusterConfig.daemonize = false
project.afterEvaluate {
ClusterFormationTasks.setup(project, this, clusterConfig)
}

View File

@ -39,6 +39,27 @@
<module name="EqualsHashCode" />
<!-- Checks that the order of modifiers conforms to the suggestions in the
Java Language specification, sections 8.1.1, 8.3.1 and 8.4.3. It is not that
the standard is perfect, but having a consistent order makes the code more
readable and no other order is compellingly better than the standard.
The correct order is:
public
protected
private
abstract
static
final
transient
volatile
synchronized
native
strictfp
-->
<module name="ModifierOrder" />
<module name="RedundantModifier" />
<!-- We don't use Java's builtin serialization and we suppress all warning
about it. The flip side of that coin is that we shouldn't _try_ to use
it. We can't outright ban it with ForbiddenApis because it complain about

View File

@ -233,7 +233,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Security.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]ElasticsearchClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]FilterClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]node[/\\]NodeClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]support[/\\]AbstractClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]support[/\\]TransportProxyClient.java" checks="LineLength" />
@ -266,7 +265,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataMappingService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataUpdateSettingsService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]RepositoriesMetaData.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]node[/\\]DiscoveryNodes.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]IndexRoutingTable.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]IndexShardRoutingTable.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]OperationRouting.java" checks="LineLength" />
@ -341,12 +339,9 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]DiscoveryService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]DiscoverySettings.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]local[/\\]LocalDiscovery.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]NodeJoinController.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscovery.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]elect[/\\]ElectMasterService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]fd[/\\]FaultDetection.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]fd[/\\]MasterFaultDetection.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]fd[/\\]NodesFaultDetection.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]membership[/\\]MembershipAction.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ping[/\\]ZenPing.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PendingClusterStatesQueue.java" checks="LineLength" />
@ -357,7 +352,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayMetaState.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]LocalAllocateDangledIndices.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]MetaDataStateFormat.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]PrimaryShardAllocator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReplicaShardAllocator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]TransportNodesListGatewayMetaState.java" checks="LineLength" />
@ -561,7 +555,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalAggregation.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]ValuesSourceAggregationBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]BucketsAggregator.java" checks="LineLength" />
@ -581,10 +574,7 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]InternalReverseNested.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]ReverseNestedAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]InternalRange.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]RangeAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]date[/\\]InternalDateRange.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]ipv4[/\\]InternalIPv4Range.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedBytesHashSamplerAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedMapSamplerAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedNumericSamplerAggregator.java" checks="LineLength" />
@ -592,46 +582,34 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]InternalSampler.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]SamplerAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]GlobalOrdinalsSignificantTermsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]InternalSignificantTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantLongTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantStringTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsAggregatorFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsParametersParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]UnmappedSignificantTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]GND.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]NXYSignificanceHeuristic.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]PercentageScore.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]ScriptHeuristic.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]SignificanceHeuristic.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]AbstractTermsParametersParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]DoubleTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]DoubleTermsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]GlobalOrdinalsStringTermsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]InternalOrder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]InternalTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]LongTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]LongTermsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]StringTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]StringTermsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregatorFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsParametersParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]UnmappedTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]support[/\\]IncludeExclude.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]ValuesSourceMetricsAggregationBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]CardinalityAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]HyperLogLogPlusPlus.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geobounds[/\\]GeoBoundsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geobounds[/\\]InternalGeoBounds.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]AbstractTDigestPercentilesAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentileRanksAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentilesAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]scripted[/\\]InternalScriptedMetric.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]scripted[/\\]ScriptedMetricAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]stats[/\\]extended[/\\]ExtendedStatsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]tophits[/\\]TopHitsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]bucketscript[/\\]BucketScriptPipelineAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]derivative[/\\]InternalDerivative.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]AggregationPath.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]ValuesSourceParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueFormat.java" checks="LineLength" />
@ -762,7 +740,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]allocation[/\\]ShardsAllocatorModuleIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]allocation[/\\]SimpleAllocationIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]health[/\\]ClusterIndexHealthTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]health[/\\]ClusterStateHealthTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]AutoExpandReplicasTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]DateMathExpressionResolverTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]HumanReadableIndexSettingsTests.java" checks="LineLength" />
@ -848,7 +825,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]BlockingClusterStatePublishResponseHandlerTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]DiscoveryWithServiceDisruptionsIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]ZenUnicastDiscoveryIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]NodeJoinControllerTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscoveryUnitTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PublishClusterStateActionTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]document[/\\]DocumentActionsIT.java" checks="LineLength" />
@ -869,7 +845,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReplicaShardAllocatorTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReusePeerRecoverySharedTest.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]get[/\\]GetActionIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]http[/\\]netty[/\\]NettyHttpServerPipeliningTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexModuleTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexServiceTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexWithShadowReplicasIT.java" checks="LineLength" />
@ -1060,7 +1035,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsDocCountErrorIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsShardMinDocCountIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregatorTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificanceHeuristicTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AbstractGeoTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AvgIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]SumIT.java" checks="LineLength" />
@ -1132,16 +1106,10 @@
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]MoreExpressionTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyPlugin.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BucketScriptTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HDRPercentileRanksTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HDRPercentilesTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HistogramTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IPv4RangeTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IndexLookupTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IndicesRequestTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]LongTermsTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]MinDocCountTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]MinTests.java" checks="LineLength" />
@ -1150,8 +1118,6 @@
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SearchFieldsTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SimpleSortTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]StringTermsTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TDigestPercentileRanksTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TDigestPercentilesTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptTests.java" checks="LineLength" />
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovySecurityTests.java" checks="LineLength" />
@ -1168,7 +1134,6 @@
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportMultiPercolateAction.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportPercolateAction.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportShardMultiPercolateAction.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]RestPercolateAction.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorIT.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorIT.java" checks="LineLength" />
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorRequestTests.java" checks="LineLength" />
@ -1180,10 +1145,10 @@
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]JapaneseStopTokenFilterFactory.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]KuromojiAnalysisTests.java" checks="LineLength" />
<suppress files="plugins[/\\]analysis-phonetic[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PhoneticTokenFilterFactory.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]AbstractAzureTestCase.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureMinimumMasterNodesTests.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureSimpleTests.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-azure[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureTwoStartedNodesTests.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-azure-classic[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]AbstractAzureTestCase.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-azure-classic[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureMinimumMasterNodesTests.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-azure-classic[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureSimpleTests.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-azure-classic[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]azure[/\\]AzureTwoStartedNodesTests.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]aws[/\\]AbstractAwsTestCase.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-ec2[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]ec2[/\\]AmazonEC2Mock.java" checks="LineLength" />
<suppress files="plugins[/\\]discovery-gce[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]gce[/\\]GceNetworkTests.java" checks="LineLength" />
@ -1208,10 +1173,6 @@
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapper.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperUpgradeTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-size[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]size[/\\]SizeFieldMapper.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-size[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]size[/\\]SizeFieldMapperUpgradeTests.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-size[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]size[/\\]SizeMappingIT.java" checks="LineLength" />
<suppress files="plugins[/\\]mapper-size[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]size[/\\]SizeMappingTests.java" checks="LineLength" />
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]blobstore[/\\]AzureBlobContainer.java" checks="LineLength" />
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]blobstore[/\\]AzureBlobStore.java" checks="LineLength" />
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]storage[/\\]AzureStorageServiceImpl.java" checks="LineLength" />
@ -1264,26 +1225,8 @@
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]MockEngineSupport.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchAssertions.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]json[/\\]JsonPath.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanEqualToParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]LessThanOrEqualToParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]LessThanParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]RestTestSuiteParseContext.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]RestTestSuiteParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]GreaterThanAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]GreaterThanEqualToAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]LengthAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]LessThanAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]LessThanOrEqualToAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]MatchAssertion.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]spec[/\\]RestApiParser.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]support[/\\]FileUtils.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSDirectoryService.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSIndexStore.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]test[/\\]FileUtilsTests.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]test[/\\]JsonPathTests.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]test[/\\]RestTestParserTests.java" checks="LineLength" />
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]test[/\\]InternalTestClusterTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CliTool.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]admin[/\\]indices[/\\]settings[/\\]RestGetSettingsAction.java" checks="LineLength" />

View File

@ -36,7 +36,7 @@ public class NamingConventionsCheckBadClasses {
public void testDummy() {}
}
public static abstract class DummyAbstractTests extends UnitTestCase {
public abstract static class DummyAbstractTests extends UnitTestCase {
}
public interface DummyInterfaceTests {

View File

@ -1,4 +1,4 @@
elasticsearch = 5.0.0-alpha4
elasticsearch = 5.0.0-alpha5
lucene = 6.1.0
# optional dependencies
@ -13,7 +13,7 @@ jna = 4.2.2
randomizedrunner = 2.3.2
junit = 4.11
httpclient = 4.5.2
httpcore = 4.4.4
httpcore = 4.4.5
commonslogging = 1.1.3
commonscodec = 1.10
hamcrest = 1.3

View File

@ -26,9 +26,13 @@ apply plugin: 'ru.vyarus.animalsniffer'
targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
group = 'org.elasticsearch.client'
dependencies {
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
compile "org.apache.httpcomponents:httpasyncclient:4.1.2"
compile "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}"
compile "commons-codec:commons-codec:${versions.commonscodec}"
compile "commons-logging:commons-logging:${versions.commonslogging}"
@ -54,6 +58,11 @@ forbiddenApisTest {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
}
dependencyLicenses {
mapping from: /http.*/, to: 'httpclient'
mapping from: /commons-.*/, to: 'commons'
}
//JarHell is part of es core, which we don't want to pull in
jarHell.enabled=false

View File

@ -1,6 +0,0 @@
Apache Commons Logging
Copyright 2003-2014 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -0,0 +1 @@
95aa3e6fb520191a0970a73cf09f62948ee614be

View File

@ -1 +0,0 @@
b31526a230871fbe285fbcbe2813f9c0839ae9b0

View File

@ -0,0 +1 @@
e7501a1b34325abb00d17dde96150604a0658b54

View File

@ -0,0 +1 @@
f4be009e7505f6ceddf21e7960c759f413f15056

View File

@ -0,0 +1,111 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.ContentTooLongException;
import org.apache.http.HttpEntity;
import org.apache.http.HttpException;
import org.apache.http.HttpResponse;
import org.apache.http.entity.ContentType;
import org.apache.http.nio.ContentDecoder;
import org.apache.http.nio.IOControl;
import org.apache.http.nio.entity.ContentBufferEntity;
import org.apache.http.nio.protocol.AbstractAsyncResponseConsumer;
import org.apache.http.nio.util.ByteBufferAllocator;
import org.apache.http.nio.util.HeapByteBufferAllocator;
import org.apache.http.nio.util.SimpleInputBuffer;
import org.apache.http.protocol.HttpContext;
import java.io.IOException;
/**
* Default implementation of {@link org.apache.http.nio.protocol.HttpAsyncResponseConsumer}. Buffers the whole
* response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response.
* Limits the size of responses that can be read to {@link #DEFAULT_BUFFER_LIMIT} by default, configurable value.
* Throws an exception in case the entity is longer than the configured buffer limit.
*/
public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer<HttpResponse> {
//default buffer limit is 10MB
public static final int DEFAULT_BUFFER_LIMIT = 10 * 1024 * 1024;
private final int bufferLimit;
private volatile HttpResponse response;
private volatile SimpleInputBuffer buf;
/**
* Creates a new instance of this consumer with a buffer limit of {@link #DEFAULT_BUFFER_LIMIT}
*/
public HeapBufferedAsyncResponseConsumer() {
this.bufferLimit = DEFAULT_BUFFER_LIMIT;
}
/**
* Creates a new instance of this consumer with the provided buffer limit
*/
public HeapBufferedAsyncResponseConsumer(int bufferLimit) {
if (bufferLimit <= 0) {
throw new IllegalArgumentException("bufferLimit must be greater than 0");
}
this.bufferLimit = bufferLimit;
}
@Override
protected void onResponseReceived(HttpResponse response) throws HttpException, IOException {
this.response = response;
}
@Override
protected void onEntityEnclosed(HttpEntity entity, ContentType contentType) throws IOException {
long len = entity.getContentLength();
if (len > bufferLimit) {
throw new ContentTooLongException("entity content is too long [" + len +
"] for the configured buffer limit [" + bufferLimit + "]");
}
if (len < 0) {
len = 4096;
}
this.buf = new SimpleInputBuffer((int) len, getByteBufferAllocator());
this.response.setEntity(new ContentBufferEntity(entity, this.buf));
}
/**
* Returns the instance of {@link ByteBufferAllocator} to use for content buffering.
* Allows to plug in any {@link ByteBufferAllocator} implementation.
*/
protected ByteBufferAllocator getByteBufferAllocator() {
return HeapByteBufferAllocator.INSTANCE;
}
@Override
protected void onContentReceived(ContentDecoder decoder, IOControl ioctrl) throws IOException {
this.buf.consumeContent(decoder);
}
@Override
protected HttpResponse buildResult(HttpContext context) throws Exception {
return response;
}
@Override
protected void releaseResources() {
response = null;
}
}

View File

@ -28,7 +28,7 @@ import java.net.URI;
*/
final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase {
final static String METHOD_NAME = HttpDelete.METHOD_NAME;
static final String METHOD_NAME = HttpDelete.METHOD_NAME;
HttpDeleteWithEntity(final URI uri) {
setURI(uri);

View File

@ -28,7 +28,7 @@ import java.net.URI;
*/
final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
final static String METHOD_NAME = HttpGet.METHOD_NAME;
static final String METHOD_NAME = HttpGet.METHOD_NAME;
HttpGetWithEntity(final URI uri) {
setURI(uri);

View File

@ -26,6 +26,7 @@ import org.apache.http.HttpEntity;
import org.apache.http.HttpEntityEnclosingRequest;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.RequestLine;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.entity.BufferedHttpEntity;
import org.apache.http.entity.ContentType;
@ -55,7 +56,7 @@ final class RequestLogger {
*/
static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) {
if (logger.isDebugEnabled()) {
logger.debug("request [" + request.getMethod() + " " + host + request.getRequestLine().getUri() +
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) +
"] returned [" + httpResponse.getStatusLine() + "]");
}
if (tracer.isTraceEnabled()) {
@ -80,9 +81,11 @@ final class RequestLogger {
/**
* Logs a request that failed
*/
static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, IOException e) {
logger.debug("request [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "] failed", e);
if (logger.isTraceEnabled()) {
static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, Exception e) {
if (logger.isDebugEnabled()) {
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e);
}
if (tracer.isTraceEnabled()) {
String traceRequest;
try {
traceRequest = buildTraceRequest(request, host);
@ -98,7 +101,7 @@ final class RequestLogger {
* Creates curl output for given request
*/
static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException {
String requestLine = "curl -iX " + request.getMethod() + " '" + host + request.getRequestLine().getUri() + "'";
String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestLine()) + "'";
if (request instanceof HttpEntityEnclosingRequest) {
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
if (enclosingRequest.getEntity() != null) {
@ -143,4 +146,11 @@ final class RequestLogger {
}
return responseLine;
}
private static String getUri(RequestLine requestLine) {
if (requestLine.getUri().charAt(0) != '/') {
return "/" + requestLine.getUri();
}
return requestLine.getUri();
}
}

View File

@ -22,26 +22,23 @@ package org.elasticsearch.client;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.RequestLine;
import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import java.io.Closeable;
import java.io.IOException;
import java.util.Objects;
/**
* Holds an elasticsearch response. It wraps the {@link CloseableHttpResponse} response and associates it with
* Holds an elasticsearch response. It wraps the {@link HttpResponse} returned and associates it with
* its corresponding {@link RequestLine} and {@link HttpHost}.
* It must be closed to free any resource held by it, as well as the corresponding connection in the connection pool.
*/
public class Response implements Closeable {
public final class Response {
private final RequestLine requestLine;
private final HttpHost host;
private final CloseableHttpResponse response;
private final HttpResponse response;
Response(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) {
Response(RequestLine requestLine, HttpHost host, HttpResponse response) {
Objects.requireNonNull(requestLine, "requestLine cannot be null");
Objects.requireNonNull(host, "node cannot be null");
Objects.requireNonNull(response, "response cannot be null");
@ -107,9 +104,4 @@ public class Response implements Closeable {
", response=" + response.getStatusLine() +
'}';
}
@Override
public void close() throws IOException {
this.response.close();
}
}

View File

@ -23,44 +23,26 @@ import java.io.IOException;
/**
* Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error.
* Note that the response body gets passed in as a string and read eagerly, which means that the Response object
* is expected to be closed and available only to read metadata like status line, request line, response headers.
* Holds the response that was returned.
*/
public class ResponseException extends IOException {
public final class ResponseException extends IOException {
private Response response;
private final String responseBody;
ResponseException(Response response, String responseBody) throws IOException {
super(buildMessage(response,responseBody));
ResponseException(Response response) throws IOException {
super(buildMessage(response));
this.response = response;
this.responseBody = responseBody;
}
private static String buildMessage(Response response, String responseBody) {
String message = response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri()
private static String buildMessage(Response response) {
return response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri()
+ ": " + response.getStatusLine().toString();
if (responseBody != null) {
message += "\n" + responseBody;
}
return message;
}
/**
* Returns the {@link Response} that caused this exception to be thrown.
* Expected to be used only to read metadata like status line, request line, response headers. The response body should
* be retrieved using {@link #getResponseBody()}
*/
public Response getResponse() {
return response;
}
/**
* Returns the response body as a string or null if there wasn't any.
* The body is eagerly consumed when an ResponseException gets created, and its corresponding Response
* gets closed straightaway so this method is the only way to get back the response body that was returned.
*/
public String getResponseBody() {
return responseBody;
}
}

View File

@ -0,0 +1,40 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
/**
* Listener to be provided when calling async performRequest methods provided by {@link RestClient}.
* Those methods that do accept a listener will return immediately, execute asynchronously, and notify
* the listener whenever the request yielded a response, or failed with an exception.
*/
public interface ResponseListener {
/**
* Method invoked if the request yielded a successful response
*/
void onSuccess(Response response);
/**
* Method invoked if the request failed. There are two main categories of failures: connection failures (usually
* {@link java.io.IOException}s, or responses that were treated as errors based on their error response code
* ({@link ResponseException}s).
*/
void onFailure(Exception exception);
}

View File

@ -20,14 +20,12 @@ package org.elasticsearch.client;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.Consts;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.client.ClientProtocolException;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpOptions;
@ -37,22 +35,22 @@ import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.client.methods.HttpTrace;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.config.Registry;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.entity.ContentType;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.util.EntityUtils;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.nio.client.methods.HttpAsyncMethods;
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import java.io.Closeable;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
@ -60,30 +58,31 @@ import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
/**
* Client that connects to an elasticsearch cluster through http.
* Must be created using {@link Builder}, which allows to set all the different options or just rely on defaults.
* Must be created using {@link RestClientBuilder}, which allows to set all the different options or just rely on defaults.
* The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later
* by calling {@link #setHosts(HttpHost...)}.
* The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When
* sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and
* retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously
* failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that
* deserve a retry) are retried till one responds or none of them does, in which case an {@link IOException} will be thrown.
* deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown.
*
* Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format.
*/
public final class RestClient implements Closeable {
private static final Log logger = LogFactory.getLog(RestClient.class);
public static ContentType JSON_CONTENT_TYPE = ContentType.create("application/json", Consts.UTF_8);
private final CloseableHttpClient client;
//we don't rely on default headers supported by HttpClient as those cannot be replaced, plus it would get hairy
//when we create the HttpClient instance on our own as there would be two different ways to set the default headers.
private final CloseableHttpAsyncClient client;
//we don't rely on default headers supported by HttpAsyncClient as those cannot be replaced
private final Header[] defaultHeaders;
private final long maxRetryTimeoutMillis;
private final AtomicInteger lastHostIndex = new AtomicInteger(0);
@ -91,7 +90,7 @@ public final class RestClient implements Closeable {
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
private final FailureListener failureListener;
private RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
RestClient(CloseableHttpAsyncClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
HttpHost[] hosts, FailureListener failureListener) {
this.client = client;
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
@ -100,6 +99,13 @@ public final class RestClient implements Closeable {
setHosts(hosts);
}
/**
* Returns a new {@link RestClientBuilder} to help with {@link RestClient} creation.
*/
public static RestClientBuilder builder(HttpHost... hosts) {
return new RestClientBuilder(hosts);
}
/**
* Replaces the hosts that the client communicates with.
* @see HttpHost
@ -118,11 +124,44 @@ public final class RestClient implements Closeable {
}
/**
* Sends a request to the elasticsearch cluster that the current client points to.
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures,
* the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried
* till one responds or none of them does, in which case an {@link IOException} will be thrown.
* Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters
* and request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param headers the optional request headers
* @return the response returned by elasticsearch
* @throws IOException in case of a problem or the connection was aborted
* @throws ClientProtocolException in case of an http protocol error
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
*/
public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), (HttpEntity)null, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param headers the optional request headers
* @return the response returned by elasticsearch
* @throws IOException in case of a problem or the connection was aborted
* @throws ClientProtocolException in case of an http protocol error
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
*/
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
return performRequest(method, endpoint, params, (HttpEntity)null, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to and waits for the corresponding response
* to be returned. Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, Header...)}
* which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
* will be used to consume the response body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
@ -136,72 +175,183 @@ public final class RestClient implements Closeable {
*/
public Response performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, Header... headers) throws IOException {
HttpAsyncResponseConsumer<HttpResponse> responseConsumer = new HeapBufferedAsyncResponseConsumer();
return performRequest(method, endpoint, params, entity, responseConsumer, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to. Blocks until the request is completed and returns
* its response or fails by throwing an exception. Selects a host out of the provided ones in a round-robin fashion. Failing hosts
* are marked dead and retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times
* they previously failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead
* nodes that deserve a retry) are retried until one responds or none of them does, in which case an {@link IOException} will be thrown.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param entity the body of the request, null if not applicable
* @param responseConsumer the {@link HttpAsyncResponseConsumer} callback. Controls how the response
* body gets streamed from a non-blocking HTTP connection on the client side.
* @param headers the optional request headers
* @return the response returned by elasticsearch
* @throws IOException in case of a problem or the connection was aborted
* @throws ClientProtocolException in case of an http protocol error
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
*/
public Response performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
Header... headers) throws IOException {
SyncResponseListener listener = new SyncResponseListener(maxRetryTimeoutMillis);
performRequest(method, endpoint, params, entity, responseConsumer, listener, headers);
return listener.get();
}
/**
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
* {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without parameters and request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
public void performRequest(String method, String endpoint, ResponseListener responseListener, Header... headers) {
performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, responseListener, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
* the provided {@link ResponseListener} will be notified upon completion or failure. Shortcut to
* {@link #performRequest(String, String, Map, HttpEntity, ResponseListener, Header...)} but without request body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
public void performRequest(String method, String endpoint, Map<String, String> params,
ResponseListener responseListener, Header... headers) {
performRequest(method, endpoint, params, null, responseListener, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to. Doesn't wait for the response, instead
* the provided {@link ResponseListener} will be notified upon completion or failure.
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, HttpAsyncResponseConsumer, ResponseListener, Header...)}
* which doesn't require specifying an {@link HttpAsyncResponseConsumer} instance, {@link HeapBufferedAsyncResponseConsumer}
* will be used to consume the response body.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param entity the body of the request, null if not applicable
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
public void performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, ResponseListener responseListener, Header... headers) {
HttpAsyncResponseConsumer<HttpResponse> responseConsumer = new HeapBufferedAsyncResponseConsumer();
performRequest(method, endpoint, params, entity, responseConsumer, responseListener, headers);
}
/**
* Sends a request to the elasticsearch cluster that the client points to. The request is executed asynchronously
* and the provided {@link ResponseListener} gets notified upon request completion or failure.
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
* amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures,
* the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried
* until one responds or none of them does, in which case an {@link IOException} will be thrown.
*
* @param method the http method
* @param endpoint the path of the request (without host and port)
* @param params the query_string parameters
* @param entity the body of the request, null if not applicable
* @param responseConsumer the {@link HttpAsyncResponseConsumer} callback. Controls how the response
* body gets streamed from a non-blocking HTTP connection on the client side.
* @param responseListener the {@link ResponseListener} to notify when the request is completed or fails
* @param headers the optional request headers
*/
public void performRequest(String method, String endpoint, Map<String, String> params,
HttpEntity entity, HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
ResponseListener responseListener, Header... headers) {
URI uri = buildUri(endpoint, params);
HttpRequestBase request = createHttpRequest(method, uri, entity);
setHeaders(request, headers);
//we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt
long retryTimeoutMillis = Math.round(this.maxRetryTimeoutMillis / (float)100 * 98);
IOException lastSeenException = null;
FailureTrackingResponseListener failureTrackingResponseListener = new FailureTrackingResponseListener(responseListener);
long startTime = System.nanoTime();
for (HttpHost host : nextHost()) {
if (lastSeenException != null) {
//in case we are retrying, check whether maxRetryTimeout has been reached, in which case an exception will be thrown
long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
long timeout = retryTimeoutMillis - timeElapsedMillis;
if (timeout <= 0) {
IOException retryTimeoutException = new IOException(
"request retries exceeded max retry timeout [" + retryTimeoutMillis + "]");
retryTimeoutException.addSuppressed(lastSeenException);
throw retryTimeoutException;
performRequest(startTime, nextHost().iterator(), request, responseConsumer, failureTrackingResponseListener);
}
private void performRequest(final long startTime, final Iterator<HttpHost> hosts, final HttpRequestBase request,
final HttpAsyncResponseConsumer<HttpResponse> responseConsumer,
final FailureTrackingResponseListener listener) {
final HttpHost host = hosts.next();
//we stream the request body if the entity allows for it
HttpAsyncRequestProducer requestProducer = HttpAsyncMethods.create(host, request);
client.execute(requestProducer, responseConsumer, new FutureCallback<HttpResponse>() {
@Override
public void completed(HttpResponse httpResponse) {
try {
RequestLogger.logResponse(logger, request, host, httpResponse);
int statusCode = httpResponse.getStatusLine().getStatusCode();
Response response = new Response(request.getRequestLine(), host, httpResponse);
if (isSuccessfulResponse(request.getMethod(), statusCode)) {
onResponse(host);
listener.onSuccess(response);
} else {
ResponseException responseException = new ResponseException(response);
if (isRetryStatus(statusCode)) {
//mark host dead and retry against next one
onFailure(host);
retryIfPossible(responseException, hosts, request);
} else {
//mark host alive and don't retry, as the error should be a request problem
onResponse(host);
listener.onDefinitiveFailure(responseException);
}
}
} catch(Exception e) {
listener.onDefinitiveFailure(e);
}
//also reset the request to make it reusable for the next attempt
request.reset();
}
CloseableHttpResponse httpResponse;
try {
httpResponse = client.execute(host, request);
} catch(IOException e) {
RequestLogger.logFailedRequest(logger, request, host, e);
onFailure(host);
lastSeenException = addSuppressedException(lastSeenException, e);
continue;
}
Response response = new Response(request.getRequestLine(), host, httpResponse);
int statusCode = response.getStatusLine().getStatusCode();
if (statusCode < 300 || (request.getMethod().equals(HttpHead.METHOD_NAME) && statusCode == 404) ) {
RequestLogger.logResponse(logger, request, host, httpResponse);
onResponse(host);
return response;
}
RequestLogger.logResponse(logger, request, host, httpResponse);
String responseBody;
try {
if (response.getEntity() == null) {
responseBody = null;
} else {
responseBody = EntityUtils.toString(response.getEntity());
}
} finally {
response.close();
}
lastSeenException = addSuppressedException(lastSeenException, new ResponseException(response, responseBody));
switch(statusCode) {
case 502:
case 503:
case 504:
//mark host dead and retry against next one
@Override
public void failed(Exception failure) {
try {
RequestLogger.logFailedRequest(logger, request, host, failure);
onFailure(host);
break;
default:
//mark host alive and don't retry, as the error should be a request problem
onResponse(host);
throw lastSeenException;
retryIfPossible(failure, hosts, request);
} catch(Exception e) {
listener.onDefinitiveFailure(e);
}
}
}
//we get here only when we tried all nodes and they all failed
assert lastSeenException != null;
throw lastSeenException;
private void retryIfPossible(Exception exception, Iterator<HttpHost> hosts, HttpRequestBase request) {
if (hosts.hasNext()) {
//in case we are retrying, check whether maxRetryTimeout has been reached
long timeElapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime);
long timeout = maxRetryTimeoutMillis - timeElapsedMillis;
if (timeout <= 0) {
IOException retryTimeoutException = new IOException(
"request retries exceeded max retry timeout [" + maxRetryTimeoutMillis + "]");
listener.onDefinitiveFailure(retryTimeoutException);
} else {
listener.trackFailure(exception);
request.reset();
performRequest(startTime, hosts, request, responseConsumer, listener);
}
} else {
listener.onDefinitiveFailure(exception);
}
}
@Override
public void cancelled() {
listener.onDefinitiveFailure(new ExecutionException("request was cancelled", null));
}
});
}
private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) {
@ -216,38 +366,43 @@ public final class RestClient implements Closeable {
}
/**
* Returns an iterator of hosts to be used for a request call.
* Ideally, the first host is retrieved from the iterator and used successfully for the request.
* Otherwise, after each failure the next host should be retrieved from the iterator so that the request can be retried till
* the iterator is exhausted. The maximum total of attempts is equal to the number of hosts that are available in the iterator.
* The iterator returned will never be empty, rather an {@link IllegalStateException} in case there are no hosts.
* In case there are no healthy hosts available, or dead ones to be be retried, one dead host gets returned.
* Returns an {@link Iterable} of hosts to be used for a request call.
* Ideally, the first host is retrieved from the iterable and used successfully for the request.
* Otherwise, after each failure the next host has to be retrieved from the iterator so that the request can be retried until
* there are no more hosts available to retry against. The maximum total of attempts is equal to the number of hosts in the iterable.
* The iterator returned will never be empty. In case there are no healthy hosts available, or dead ones to be be retried,
* one dead host gets returned so that it can be retried.
*/
private Iterable<HttpHost> nextHost() {
Set<HttpHost> filteredHosts = new HashSet<>(hosts);
for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) {
if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) {
filteredHosts.remove(entry.getKey());
}
}
if (filteredHosts.isEmpty()) {
//last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried
List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet());
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
@Override
public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos());
Collection<HttpHost> nextHosts = Collections.emptySet();
do {
Set<HttpHost> filteredHosts = new HashSet<>(hosts);
for (Map.Entry<HttpHost, DeadHostState> entry : blacklist.entrySet()) {
if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) {
filteredHosts.remove(entry.getKey());
}
});
HttpHost deadHost = sortedHosts.get(0).getKey();
logger.trace("resurrecting host [" + deadHost + "]");
return Collections.singleton(deadHost);
}
List<HttpHost> rotatedHosts = new ArrayList<>(filteredHosts);
Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement());
return rotatedHosts;
}
if (filteredHosts.isEmpty()) {
//last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried
List<Map.Entry<HttpHost, DeadHostState>> sortedHosts = new ArrayList<>(blacklist.entrySet());
if (sortedHosts.size() > 0) {
Collections.sort(sortedHosts, new Comparator<Map.Entry<HttpHost, DeadHostState>>() {
@Override
public int compare(Map.Entry<HttpHost, DeadHostState> o1, Map.Entry<HttpHost, DeadHostState> o2) {
return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos());
}
});
HttpHost deadHost = sortedHosts.get(0).getKey();
logger.trace("resurrecting host [" + deadHost + "]");
nextHosts = Collections.singleton(deadHost);
}
} else {
List<HttpHost> rotatedHosts = new ArrayList<>(filteredHosts);
Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement());
nextHosts = rotatedHosts;
}
} while(nextHosts.isEmpty());
return nextHosts;
}
/**
@ -285,7 +440,21 @@ public final class RestClient implements Closeable {
client.close();
}
private static IOException addSuppressedException(IOException suppressedException, IOException currentException) {
private static boolean isSuccessfulResponse(String method, int statusCode) {
return statusCode < 300 || (HttpHead.METHOD_NAME.equals(method) && statusCode == 404);
}
private static boolean isRetryStatus(int statusCode) {
switch(statusCode) {
case 502:
case 503:
case 504:
return true;
}
return false;
}
private static Exception addSuppressedException(Exception suppressedException, Exception currentException) {
if (suppressedException != null) {
currentException.addSuppressed(suppressedException);
}
@ -342,121 +511,113 @@ public final class RestClient implements Closeable {
}
/**
* Returns a new {@link Builder} to help with {@link RestClient} creation.
* Listener used in any async call to wrap the provided user listener (or SyncResponseListener in sync calls).
* Allows to track potential failures coming from the different retry attempts and returning to the original listener
* only when we got a response (successful or not to be retried) or there are no hosts to retry against.
*/
public static Builder builder(HttpHost... hosts) {
return new Builder(hosts);
static class FailureTrackingResponseListener {
private final ResponseListener responseListener;
private volatile Exception exception;
FailureTrackingResponseListener(ResponseListener responseListener) {
this.responseListener = responseListener;
}
/**
* Notifies the caller of a response through the wrapped listener
*/
void onSuccess(Response response) {
responseListener.onSuccess(response);
}
/**
* Tracks one last definitive failure and returns to the caller by notifying the wrapped listener
*/
void onDefinitiveFailure(Exception exception) {
trackFailure(exception);
responseListener.onFailure(this.exception);
}
/**
* Tracks an exception, which caused a retry hence we should not return yet to the caller
*/
void trackFailure(Exception exception) {
this.exception = addSuppressedException(this.exception, exception);
}
}
/**
* Rest client builder. Helps creating a new {@link RestClient}.
* Listener used in any sync performRequest calls, it waits for a response or an exception back up to a timeout
*/
public static final class Builder {
public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000;
public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 10000;
public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS;
public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS = 500;
static class SyncResponseListener implements ResponseListener {
private final CountDownLatch latch = new CountDownLatch(1);
private final AtomicReference<Response> response = new AtomicReference<>();
private final AtomicReference<Exception> exception = new AtomicReference<>();
private static final Header[] EMPTY_HEADERS = new Header[0];
private final long timeout;
private final HttpHost[] hosts;
private CloseableHttpClient httpClient;
private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
private Header[] defaultHeaders = EMPTY_HEADERS;
private FailureListener failureListener;
SyncResponseListener(long timeout) {
assert timeout > 0;
this.timeout = timeout;
}
/**
* Creates a new builder instance and sets the hosts that the client will send requests to.
*/
private Builder(HttpHost... hosts) {
if (hosts == null || hosts.length == 0) {
throw new IllegalArgumentException("no hosts provided");
@Override
public void onSuccess(Response response) {
Objects.requireNonNull(response, "response must not be null");
boolean wasResponseNull = this.response.compareAndSet(null, response);
if (wasResponseNull == false) {
throw new IllegalStateException("response is already set");
}
this.hosts = hosts;
latch.countDown();
}
@Override
public void onFailure(Exception exception) {
Objects.requireNonNull(exception, "exception must not be null");
boolean wasExceptionNull = this.exception.compareAndSet(null, exception);
if (wasExceptionNull == false) {
throw new IllegalStateException("exception is already set");
}
latch.countDown();
}
/**
* Sets the http client. A new default one will be created if not
* specified, by calling {@link #createDefaultHttpClient(Registry)})}.
*
* @see CloseableHttpClient
* Waits (up to a timeout) for some result of the request: either a response, or an exception.
*/
public Builder setHttpClient(CloseableHttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
*
* @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0
*/
public Builder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) {
if (maxRetryTimeoutMillis <= 0) {
throw new IllegalArgumentException("maxRetryTimeoutMillis must be greater than 0");
Response get() throws IOException {
try {
//providing timeout is just a safety measure to prevent everlasting waits
//the different client timeouts should already do their jobs
if (latch.await(timeout, TimeUnit.MILLISECONDS) == false) {
throw new IOException("listener timeout after waiting for [" + timeout + "] ms");
}
} catch (InterruptedException e) {
throw new RuntimeException("thread waiting for the response was interrupted", e);
}
this.maxRetryTimeout = maxRetryTimeoutMillis;
return this;
}
/**
* Sets the default request headers, to be used when creating the default http client instance.
* In case the http client is set through {@link #setHttpClient(CloseableHttpClient)}, the default headers need to be
* set to it externally during http client construction.
*/
public Builder setDefaultHeaders(Header[] defaultHeaders) {
Objects.requireNonNull(defaultHeaders, "default headers must not be null");
for (Header defaultHeader : defaultHeaders) {
Objects.requireNonNull(defaultHeader, "default header must not be null");
Exception exception = this.exception.get();
Response response = this.response.get();
if (exception != null) {
if (response != null) {
IllegalStateException e = new IllegalStateException("response and exception are unexpectedly set at the same time");
e.addSuppressed(exception);
throw e;
}
//try and leave the exception untouched as much as possible but we don't want to just add throws Exception clause everywhere
if (exception instanceof IOException) {
throw (IOException) exception;
}
if (exception instanceof RuntimeException){
throw (RuntimeException) exception;
}
throw new RuntimeException("error while performing request", exception);
}
this.defaultHeaders = defaultHeaders;
return this;
}
/**
* Sets the {@link FailureListener} to be notified for each request failure
*/
public Builder setFailureListener(FailureListener failureListener) {
Objects.requireNonNull(failureListener, "failure listener must not be null");
this.failureListener = failureListener;
return this;
}
/**
* Creates a new {@link RestClient} based on the provided configuration.
*/
public RestClient build() {
if (httpClient == null) {
httpClient = createDefaultHttpClient(null);
if (response == null) {
throw new IllegalStateException("response not set and no exception caught either");
}
if (failureListener == null) {
failureListener = new FailureListener();
}
return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
}
/**
* Creates a {@link CloseableHttpClient} with default settings. Used when the http client instance is not provided.
*
* @see CloseableHttpClient
*/
public static CloseableHttpClient createDefaultHttpClient(Registry<ConnectionSocketFactory> socketFactoryRegistry) {
PoolingHttpClientConnectionManager connectionManager;
if (socketFactoryRegistry == null) {
connectionManager = new PoolingHttpClientConnectionManager();
} else {
connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
}
//default settings may be too constraining
connectionManager.setDefaultMaxPerRoute(10);
connectionManager.setMaxTotal(30);
//default timeouts are all infinite
RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS).build();
return HttpClientBuilder.create().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build();
return response;
}
}
@ -468,7 +629,7 @@ public final class RestClient implements Closeable {
/**
* Notifies that the host provided as argument has just failed
*/
public void onFailure(HttpHost host) throws IOException {
public void onFailure(HttpHost host) {
}
}

View File

@ -0,0 +1,179 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.nio.conn.SchemeIOSessionStrategy;
import java.util.Objects;
/**
* Helps creating a new {@link RestClient}. Allows to set the most common http client configuration options when internally
* creating the underlying {@link org.apache.http.nio.client.HttpAsyncClient}. Also allows to provide an externally created
* {@link org.apache.http.nio.client.HttpAsyncClient} in case additional customization is needed.
*/
public final class RestClientBuilder {
public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000;
public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 10000;
public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS;
public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS = 500;
public static final int DEFAULT_MAX_CONN_PER_ROUTE = 10;
public static final int DEFAULT_MAX_CONN_TOTAL = 30;
private static final Header[] EMPTY_HEADERS = new Header[0];
private final HttpHost[] hosts;
private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
private Header[] defaultHeaders = EMPTY_HEADERS;
private RestClient.FailureListener failureListener;
private HttpClientConfigCallback httpClientConfigCallback;
private RequestConfigCallback requestConfigCallback;
/**
* Creates a new builder instance and sets the hosts that the client will send requests to.
*/
RestClientBuilder(HttpHost... hosts) {
if (hosts == null || hosts.length == 0) {
throw new IllegalArgumentException("no hosts provided");
}
for (HttpHost host : hosts) {
Objects.requireNonNull(host, "host cannot be null");
}
this.hosts = hosts;
}
/**
* Sets the default request headers, which will be sent along with each request
*/
public RestClientBuilder setDefaultHeaders(Header[] defaultHeaders) {
Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null");
for (Header defaultHeader : defaultHeaders) {
Objects.requireNonNull(defaultHeader, "default header must not be null");
}
this.defaultHeaders = defaultHeaders;
return this;
}
/**
* Sets the {@link RestClient.FailureListener} to be notified for each request failure
*/
public RestClientBuilder setFailureListener(RestClient.FailureListener failureListener) {
Objects.requireNonNull(failureListener, "failureListener must not be null");
this.failureListener = failureListener;
return this;
}
/**
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
*
* @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0
*/
public RestClientBuilder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) {
if (maxRetryTimeoutMillis <= 0) {
throw new IllegalArgumentException("maxRetryTimeoutMillis must be greater than 0");
}
this.maxRetryTimeout = maxRetryTimeoutMillis;
return this;
}
/**
* Sets the {@link HttpClientConfigCallback} to be used to customize http client configuration
*/
public RestClientBuilder setHttpClientConfigCallback(HttpClientConfigCallback httpClientConfigCallback) {
Objects.requireNonNull(httpClientConfigCallback, "httpClientConfigCallback must not be null");
this.httpClientConfigCallback = httpClientConfigCallback;
return this;
}
/**
* Sets the {@link RequestConfigCallback} to be used to customize http client configuration
*/
public RestClientBuilder setRequestConfigCallback(RequestConfigCallback requestConfigCallback) {
Objects.requireNonNull(requestConfigCallback, "requestConfigCallback must not be null");
this.requestConfigCallback = requestConfigCallback;
return this;
}
/**
* Creates a new {@link RestClient} based on the provided configuration.
*/
public RestClient build() {
if (failureListener == null) {
failureListener = new RestClient.FailureListener();
}
CloseableHttpAsyncClient httpClient = createHttpClient();
RestClient restClient = new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
httpClient.start();
return restClient;
}
private CloseableHttpAsyncClient createHttpClient() {
//default timeouts are all infinite
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS);
if (requestConfigCallback != null) {
requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder);
}
HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create().setDefaultRequestConfig(requestConfigBuilder.build())
//default settings for connection pooling may be too constraining
.setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE).setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL);
if (httpClientConfigCallback != null) {
httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder);
}
return httpClientBuilder.build();
}
/**
* Callback used the default {@link RequestConfig} being set to the {@link CloseableHttpClient}
* @see HttpClientBuilder#setDefaultRequestConfig
*/
public interface RequestConfigCallback {
/**
* Allows to customize the {@link RequestConfig} that will be used with each request.
* It is common to customize the different timeout values through this method without losing any other useful default
* value that the {@link RestClientBuilder} internally sets.
*/
RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder);
}
/**
* Callback used to customize the {@link CloseableHttpClient} instance used by a {@link RestClient} instance.
* Allows to customize default {@link RequestConfig} being set to the client and any parameter that
* can be set through {@link HttpClientBuilder}
*/
public interface HttpClientConfigCallback {
/**
* Allows to customize the {@link CloseableHttpAsyncClient} being created and used by the {@link RestClient}.
* Commonly used to customize the default {@link org.apache.http.client.CredentialsProvider} for authentication
* or the {@link SchemeIOSessionStrategy} for communication through ssl without losing any other useful default
* value that the {@link RestClientBuilder} internally sets, like connection pooling.
*/
HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder);
}
}

View File

@ -0,0 +1,107 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.RequestLine;
import org.apache.http.StatusLine;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicRequestLine;
import org.apache.http.message.BasicStatusLine;
import java.util.concurrent.atomic.AtomicReference;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertSame;
public class FailureTrackingResponseListenerTests extends RestClientTestCase {
public void testOnSuccess() {
MockResponseListener responseListener = new MockResponseListener();
RestClient.FailureTrackingResponseListener listener = new RestClient.FailureTrackingResponseListener(responseListener);
}
public void testOnFailure() {
MockResponseListener responseListener = new MockResponseListener();
RestClient.FailureTrackingResponseListener listener = new RestClient.FailureTrackingResponseListener(responseListener);
int numIters = randomIntBetween(1, 10);
Exception[] expectedExceptions = new Exception[numIters];
for (int i = 0; i < numIters; i++) {
RuntimeException runtimeException = new RuntimeException("test" + i);
expectedExceptions[i] = runtimeException;
listener.trackFailure(runtimeException);
assertNull(responseListener.response.get());
assertNull(responseListener.exception.get());
}
if (randomBoolean()) {
Response response = mockResponse();
listener.onSuccess(response);
assertSame(response, responseListener.response.get());
assertNull(responseListener.exception.get());
} else {
RuntimeException runtimeException = new RuntimeException("definitive");
listener.onDefinitiveFailure(runtimeException);
assertNull(responseListener.response.get());
Throwable exception = responseListener.exception.get();
assertSame(runtimeException, exception);
int i = numIters - 1;
do {
assertNotNull(exception.getSuppressed());
assertEquals(1, exception.getSuppressed().length);
assertSame(expectedExceptions[i--], exception.getSuppressed()[0]);
exception = exception.getSuppressed()[0];
} while(i >= 0);
}
}
private static class MockResponseListener implements ResponseListener {
private final AtomicReference<Response> response = new AtomicReference<>();
private final AtomicReference<Exception> exception = new AtomicReference<>();
@Override
public void onSuccess(Response response) {
if (this.response.compareAndSet(null, response) == false) {
throw new IllegalStateException("onSuccess was called multiple times");
}
}
@Override
public void onFailure(Exception exception) {
if (this.exception.compareAndSet(null, exception) == false) {
throw new IllegalStateException("onFailure was called multiple times");
}
}
}
private static Response mockResponse() {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse);
}
}

View File

@ -0,0 +1,114 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.ContentTooLongException;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.StatusLine;
import org.apache.http.entity.BasicHttpEntity;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.ContentDecoder;
import org.apache.http.nio.IOControl;
import org.apache.http.protocol.HttpContext;
import static org.elasticsearch.client.HeapBufferedAsyncResponseConsumer.DEFAULT_BUFFER_LIMIT;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase {
//maximum buffer that this test ends up allocating is 50MB
private static final int MAX_TEST_BUFFER_SIZE = 50 * 1024 * 1024;
public void testResponseProcessing() throws Exception {
ContentDecoder contentDecoder = mock(ContentDecoder.class);
IOControl ioControl = mock(IOControl.class);
HttpContext httpContext = mock(HttpContext.class);
HeapBufferedAsyncResponseConsumer consumer = spy(new HeapBufferedAsyncResponseConsumer());
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
httpResponse.setEntity(new StringEntity("test"));
//everything goes well
consumer.responseReceived(httpResponse);
consumer.consumeContent(contentDecoder, ioControl);
consumer.responseCompleted(httpContext);
verify(consumer).releaseResources();
verify(consumer).buildResult(httpContext);
assertTrue(consumer.isDone());
assertSame(httpResponse, consumer.getResult());
consumer.responseCompleted(httpContext);
verify(consumer, times(1)).releaseResources();
verify(consumer, times(1)).buildResult(httpContext);
}
public void testDefaultBufferLimit() throws Exception {
HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer();
bufferLimitTest(consumer, DEFAULT_BUFFER_LIMIT);
}
public void testConfiguredBufferLimit() throws Exception {
try {
new HeapBufferedAsyncResponseConsumer(randomIntBetween(Integer.MIN_VALUE, 0));
} catch(IllegalArgumentException e) {
assertEquals("bufferLimit must be greater than 0", e.getMessage());
}
try {
new HeapBufferedAsyncResponseConsumer(0);
} catch(IllegalArgumentException e) {
assertEquals("bufferLimit must be greater than 0", e.getMessage());
}
int bufferLimit = randomIntBetween(1, MAX_TEST_BUFFER_SIZE - 100);
HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer(bufferLimit);
bufferLimitTest(consumer, bufferLimit);
}
private static void bufferLimitTest(HeapBufferedAsyncResponseConsumer consumer, int bufferLimit) throws Exception {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
consumer.onResponseReceived(new BasicHttpResponse(statusLine));
BasicHttpEntity entity = new BasicHttpEntity();
entity.setContentLength(randomInt(bufferLimit));
consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON);
entity.setContentLength(randomIntBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE));
try {
consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON);
} catch(ContentTooLongException e) {
assertEquals("entity content is too long [" + entity.getContentLength() +
"] for the configured buffer limit [" + bufferLimit + "]", e.getMessage());
}
}
}

View File

@ -21,7 +21,6 @@ package org.elasticsearch.client;
import org.apache.http.HttpHost;
import java.io.IOException;
import java.util.HashSet;
import java.util.Set;
@ -30,13 +29,13 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
/**
* {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called
* {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called for which host.
*/
class TrackingFailureListener extends RestClient.FailureListener {
private Set<HttpHost> hosts = new HashSet<>();
class HostsTrackingFailureListener extends RestClient.FailureListener {
private volatile Set<HttpHost> hosts = new HashSet<>();
@Override
public void onFailure(HttpHost host) throws IOException {
public void onFailure(HttpHost host) {
hosts.add(host);
}

View File

@ -35,6 +35,8 @@ import org.apache.http.entity.InputStreamEntity;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.entity.NByteArrayEntity;
import org.apache.http.nio.entity.NStringEntity;
import org.apache.http.util.EntityUtils;
import java.io.ByteArrayInputStream;
@ -50,7 +52,14 @@ public class RequestLoggerTests extends RestClientTestCase {
public void testTraceRequest() throws IOException, URISyntaxException {
HttpHost host = new HttpHost("localhost", 9200, getRandom().nextBoolean() ? "http" : "https");
URI uri = new URI("/index/type/_api");
String expectedEndpoint = "/index/type/_api";
URI uri;
if (randomBoolean()) {
uri = new URI(expectedEndpoint);
} else {
uri = new URI("index/type/_api");
}
HttpRequestBase request;
int requestType = RandomInts.randomIntBetween(getRandom(), 0, 7);
@ -83,21 +92,31 @@ public class RequestLoggerTests extends RestClientTestCase {
throw new UnsupportedOperationException();
}
String expected = "curl -iX " + request.getMethod() + " '" + host + uri + "'";
String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'";
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
String requestBody = "{ \"field\": \"value\" }";
if (hasBody) {
expected += " -d '" + requestBody + "'";
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
HttpEntity entity;
if (getRandom().nextBoolean()) {
entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
} else {
entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)));
switch(RandomInts.randomIntBetween(getRandom(), 0, 3)) {
case 0:
entity = new StringEntity(requestBody, StandardCharsets.UTF_8);
break;
case 1:
entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)));
break;
case 2:
entity = new NStringEntity(requestBody, StandardCharsets.UTF_8);
break;
case 3:
entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8));
break;
default:
throw new UnsupportedOperationException();
}
enclosingRequest.setEntity(entity);
}
String traceRequest = RequestLogger.buildTraceRequest(request, host);
assertThat(traceRequest, equalTo(expected));
if (hasBody) {

View File

@ -22,7 +22,8 @@ package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.message.BasicHeader;
import java.io.IOException;
@ -49,12 +50,16 @@ public class RestClientBuilderTests extends RestClientTestCase {
}
try {
RestClient.builder(new HttpHost[]{new HttpHost("localhost", 9200), null}).build();
RestClient.builder(new HttpHost("localhost", 9200), null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("host cannot be null", e.getMessage());
}
try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) {
assertNotNull(restClient);
}
try {
RestClient.builder(new HttpHost("localhost", 9200))
.setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(getRandom(), Integer.MIN_VALUE, 0));
@ -67,7 +72,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("default headers must not be null", e.getMessage());
assertEquals("defaultHeaders must not be null", e.getMessage());
}
try {
@ -81,7 +86,21 @@ public class RestClientBuilderTests extends RestClientTestCase {
RestClient.builder(new HttpHost("localhost", 9200)).setFailureListener(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("failure listener must not be null", e.getMessage());
assertEquals("failureListener must not be null", e.getMessage());
}
try {
RestClient.builder(new HttpHost("localhost", 9200)).setHttpClientConfigCallback(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("httpClientConfigCallback must not be null", e.getMessage());
}
try {
RestClient.builder(new HttpHost("localhost", 9200)).setRequestConfigCallback(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("requestConfigCallback must not be null", e.getMessage());
}
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
@ -89,9 +108,22 @@ public class RestClientBuilderTests extends RestClientTestCase {
for (int i = 0; i < numNodes; i++) {
hosts[i] = new HttpHost("localhost", 9200 + i);
}
RestClient.Builder builder = RestClient.builder(hosts);
RestClientBuilder builder = RestClient.builder(hosts);
if (getRandom().nextBoolean()) {
builder.setHttpClient(HttpClientBuilder.create().build());
builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
@Override
public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
return httpClientBuilder;
}
});
}
if (getRandom().nextBoolean()) {
builder.setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
@Override
public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
return requestConfigBuilder;
}
});
}
if (getRandom().nextBoolean()) {
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);

View File

@ -27,6 +27,7 @@ import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpServer;
import org.apache.http.Consts;
import org.apache.http.Header;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHost;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
@ -47,6 +48,9 @@ import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes;
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
@ -140,10 +144,10 @@ public class RestClientIntegTests extends RestClientTestCase {
* to set/add headers to the {@link org.apache.http.client.HttpClient}.
* Exercises the test http server ability to send back whatever headers it received.
*/
public void testHeaders() throws Exception {
public void testHeaders() throws IOException {
for (String method : getHttpMethods()) {
Set<String> standardHeaders = new HashSet<>(
Arrays.asList("Accept-encoding", "Connection", "Host", "User-agent", "Date"));
Arrays.asList("Connection", "Host", "User-agent", "Date"));
if (method.equals("HEAD") == false) {
standardHeaders.add("Content-length");
}
@ -162,9 +166,9 @@ public class RestClientIntegTests extends RestClientTestCase {
int statusCode = randomStatusCode(getRandom());
Response esResponse;
try (Response response = restClient.performRequest(method, "/" + statusCode,
Collections.<String, String>emptyMap(), null, headers)) {
esResponse = response;
try {
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(),
(HttpEntity)null, headers);
} catch(ResponseException e) {
esResponse = e.getResponse();
}
@ -188,7 +192,7 @@ public class RestClientIntegTests extends RestClientTestCase {
* out of the box by {@link org.apache.http.client.HttpClient}.
* Exercises the test http server ability to send back whatever body it received.
*/
public void testDeleteWithBody() throws Exception {
public void testDeleteWithBody() throws IOException {
bodyTest("DELETE");
}
@ -197,25 +201,74 @@ public class RestClientIntegTests extends RestClientTestCase {
* out of the box by {@link org.apache.http.client.HttpClient}.
* Exercises the test http server ability to send back whatever body it received.
*/
public void testGetWithBody() throws Exception {
public void testGetWithBody() throws IOException {
bodyTest("GET");
}
private void bodyTest(String method) throws Exception {
private void bodyTest(String method) throws IOException {
String requestBody = "{ \"field\": \"value\" }";
StringEntity entity = new StringEntity(requestBody);
Response esResponse;
String responseBody;
int statusCode = randomStatusCode(getRandom());
try (Response response = restClient.performRequest(method, "/" + statusCode,
Collections.<String, String>emptyMap(), entity)) {
responseBody = EntityUtils.toString(response.getEntity());
esResponse = response;
Response esResponse;
try {
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), entity);
} catch(ResponseException e) {
responseBody = e.getResponseBody();
esResponse = e.getResponse();
}
assertEquals(statusCode, esResponse.getStatusLine().getStatusCode());
assertEquals(requestBody, responseBody);
assertEquals(requestBody, EntityUtils.toString(esResponse.getEntity()));
}
public void testAsyncRequests() throws Exception {
int numRequests = randomIntBetween(5, 20);
final CountDownLatch latch = new CountDownLatch(numRequests);
final List<TestResponse> responses = new CopyOnWriteArrayList<>();
for (int i = 0; i < numRequests; i++) {
final String method = RestClientTestUtil.randomHttpMethod(getRandom());
final int statusCode = randomStatusCode(getRandom());
restClient.performRequest(method, "/" + statusCode, new ResponseListener() {
@Override
public void onSuccess(Response response) {
responses.add(new TestResponse(method, statusCode, response));
latch.countDown();
}
@Override
public void onFailure(Exception exception) {
responses.add(new TestResponse(method, statusCode, exception));
latch.countDown();
}
});
}
assertTrue(latch.await(5, TimeUnit.SECONDS));
assertEquals(numRequests, responses.size());
for (TestResponse response : responses) {
assertEquals(response.method, response.getResponse().getRequestLine().getMethod());
assertEquals(response.statusCode, response.getResponse().getStatusLine().getStatusCode());
}
}
private static class TestResponse {
private final String method;
private final int statusCode;
private final Object response;
TestResponse(String method, int statusCode, Object response) {
this.method = method;
this.statusCode = statusCode;
this.response = response;
}
Response getResponse() {
if (response instanceof Response) {
return (Response) response;
}
if (response instanceof ResponseException) {
return ((ResponseException) response).getResponse();
}
throw new AssertionError("unexpected response " + response.getClass());
}
}
}

View File

@ -19,17 +19,20 @@
package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import org.junit.Before;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@ -39,6 +42,7 @@ import java.net.SocketTimeoutException;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.Future;
import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode;
import static org.elasticsearch.client.RestClientTestUtil.randomErrorRetryStatusCode;
@ -62,58 +66,61 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
private RestClient restClient;
private HttpHost[] httpHosts;
private TrackingFailureListener failureListener;
private HostsTrackingFailureListener failureListener;
@Before
@SuppressWarnings("unchecked")
public void createRestClient() throws IOException {
CloseableHttpClient httpClient = mock(CloseableHttpClient.class);
when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer<CloseableHttpResponse>() {
CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class);
when(httpClient.<HttpResponse>execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class),
any(FutureCallback.class))).thenAnswer(new Answer<Future<HttpResponse>>() {
@Override
public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable {
HttpHost httpHost = (HttpHost) invocationOnMock.getArguments()[0];
HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1];
public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0];
HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest();
HttpHost httpHost = requestProducer.getTarget();
FutureCallback<HttpResponse> futureCallback = (FutureCallback<HttpResponse>) invocationOnMock.getArguments()[2];
//return the desired status code or exception depending on the path
if (request.getURI().getPath().equals("/soe")) {
throw new SocketTimeoutException(httpHost.toString());
futureCallback.failed(new SocketTimeoutException(httpHost.toString()));
} else if (request.getURI().getPath().equals("/coe")) {
throw new ConnectTimeoutException(httpHost.toString());
futureCallback.failed(new ConnectTimeoutException(httpHost.toString()));
} else if (request.getURI().getPath().equals("/ioe")) {
throw new IOException(httpHost.toString());
futureCallback.failed(new IOException(httpHost.toString()));
} else {
int statusCode = Integer.parseInt(request.getURI().getPath().substring(1));
StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
futureCallback.completed(new BasicHttpResponse(statusLine));
}
int statusCode = Integer.parseInt(request.getURI().getPath().substring(1));
StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
return new CloseableBasicHttpResponse(statusLine);
return null;
}
});
int numHosts = RandomInts.randomIntBetween(getRandom(), 2, 5);
httpHosts = new HttpHost[numHosts];
for (int i = 0; i < numHosts; i++) {
httpHosts[i] = new HttpHost("localhost", 9200 + i);
}
failureListener = new TrackingFailureListener();
restClient = RestClient.builder(httpHosts).setHttpClient(httpClient).setFailureListener(failureListener).build();
failureListener = new HostsTrackingFailureListener();
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, failureListener);
}
public void testRoundRobinOkStatusCodes() throws Exception {
public void testRoundRobinOkStatusCodes() throws IOException {
int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5);
for (int i = 0; i < numIters; i++) {
Set<HttpHost> hostsSet = new HashSet<>();
Collections.addAll(hostsSet, httpHosts);
for (int j = 0; j < httpHosts.length; j++) {
int statusCode = randomOkStatusCode(getRandom());
try (Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode,
Collections.<String, String>emptyMap(), null)) {
assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
}
Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode);
assertEquals(statusCode, response.getStatusLine().getStatusCode());
assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
}
assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size());
}
failureListener.assertNotCalled();
}
public void testRoundRobinNoRetryErrors() throws Exception {
public void testRoundRobinNoRetryErrors() throws IOException {
int numIters = RandomInts.randomIntBetween(getRandom(), 1, 5);
for (int i = 0; i < numIters; i++) {
Set<HttpHost> hostsSet = new HashSet<>();
@ -121,12 +128,12 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
for (int j = 0; j < httpHosts.length; j++) {
String method = randomHttpMethod(getRandom());
int statusCode = randomErrorNoRetryStatusCode(getRandom());
try (Response response = restClient.performRequest(method, "/" + statusCode,
Collections.<String, String>emptyMap(), null)) {
try {
Response response = restClient.performRequest(method, "/" + statusCode);
if (method.equals("HEAD") && statusCode == 404) {
//no exception gets thrown although we got a 404
assertThat(response.getStatusLine().getStatusCode(), equalTo(404));
assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
assertEquals(404, response.getStatusLine().getStatusCode());
assertEquals(statusCode, response.getStatusLine().getStatusCode());
assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
} else {
fail("request should have failed");
@ -136,7 +143,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
throw e;
}
Response response = e.getResponse();
assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
assertEquals(statusCode, response.getStatusLine().getStatusCode());
assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
assertEquals(0, e.getSuppressed().length);
}
@ -146,10 +153,10 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
failureListener.assertNotCalled();
}
public void testRoundRobinRetryErrors() throws Exception {
public void testRoundRobinRetryErrors() throws IOException {
String retryEndpoint = randomErrorRetryEndpoint();
try {
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint, Collections.<String, String>emptyMap(), null);
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
fail("request should have failed");
} catch(ResponseException e) {
Set<HttpHost> hostsSet = new HashSet<>();
@ -158,7 +165,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
failureListener.assertCalled(httpHosts);
do {
Response response = e.getResponse();
assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1))));
assertEquals(Integer.parseInt(retryEndpoint.substring(1)), response.getStatusLine().getStatusCode());
assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times",
hostsSet.remove(response.getHost()));
if (e.getSuppressed().length > 0) {
@ -199,7 +206,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
for (int j = 0; j < httpHosts.length; j++) {
retryEndpoint = randomErrorRetryEndpoint();
try {
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint, Collections.<String, String>emptyMap(), null);
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
fail("request should have failed");
} catch(ResponseException e) {
Response response = e.getResponse();
@ -225,9 +232,8 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
for (int y = 0; y < iters; y++) {
int statusCode = randomErrorNoRetryStatusCode(getRandom());
Response response;
try (Response esResponse = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode,
Collections.<String, String>emptyMap(), null)) {
response = esResponse;
try {
response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode);
}
catch(ResponseException e) {
response = e.getResponse();
@ -245,8 +251,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
for (int y = 0; y < i + 1; y++) {
retryEndpoint = randomErrorRetryEndpoint();
try {
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint,
Collections.<String, String>emptyMap(), null);
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
fail("request should have failed");
} catch(ResponseException e) {
Response response = e.getResponse();

View File

@ -19,7 +19,6 @@
package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
import org.apache.http.Header;
@ -27,9 +26,9 @@ import org.apache.http.HttpEntity;
import org.apache.http.HttpEntityEnclosingRequest;
import org.apache.http.HttpHost;
import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.StatusLine;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpHead;
import org.apache.http.client.methods.HttpOptions;
import org.apache.http.client.methods.HttpPatch;
@ -38,11 +37,15 @@ import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpTrace;
import org.apache.http.client.methods.HttpUriRequest;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.concurrent.FutureCallback;
import org.apache.http.conn.ConnectTimeoutException;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
import org.apache.http.message.BasicHeader;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicStatusLine;
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
import org.apache.http.util.EntityUtils;
import org.junit.Before;
import org.mockito.ArgumentCaptor;
@ -52,11 +55,11 @@ import org.mockito.stubbing.Answer;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Future;
import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes;
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
@ -87,39 +90,48 @@ public class RestClientSingleHostTests extends RestClientTestCase {
private RestClient restClient;
private Header[] defaultHeaders;
private HttpHost httpHost;
private CloseableHttpClient httpClient;
private TrackingFailureListener failureListener;
private CloseableHttpAsyncClient httpClient;
private HostsTrackingFailureListener failureListener;
@Before
@SuppressWarnings("unchecked")
public void createRestClient() throws IOException {
httpClient = mock(CloseableHttpClient.class);
when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer<CloseableHttpResponse>() {
@Override
public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable {
HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1];
//return the desired status code or exception depending on the path
if (request.getURI().getPath().equals("/soe")) {
throw new SocketTimeoutException();
} else if (request.getURI().getPath().equals("/coe")) {
throw new ConnectTimeoutException();
}
int statusCode = Integer.parseInt(request.getURI().getPath().substring(1));
StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
httpClient = mock(CloseableHttpAsyncClient.class);
when(httpClient.<HttpResponse>execute(any(HttpAsyncRequestProducer.class), any(HttpAsyncResponseConsumer.class),
any(FutureCallback.class))).thenAnswer(new Answer<Future<HttpResponse>>() {
@Override
public Future<HttpResponse> answer(InvocationOnMock invocationOnMock) throws Throwable {
HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0];
FutureCallback<HttpResponse> futureCallback = (FutureCallback<HttpResponse>) invocationOnMock.getArguments()[2];
HttpUriRequest request = (HttpUriRequest)requestProducer.generateRequest();
//return the desired status code or exception depending on the path
if (request.getURI().getPath().equals("/soe")) {
futureCallback.failed(new SocketTimeoutException());
} else if (request.getURI().getPath().equals("/coe")) {
futureCallback.failed(new ConnectTimeoutException());
} else {
int statusCode = Integer.parseInt(request.getURI().getPath().substring(1));
StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, "");
CloseableHttpResponse httpResponse = new CloseableBasicHttpResponse(statusLine);
//return the same body that was sent
if (request instanceof HttpEntityEnclosingRequest) {
HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity();
if (entity != null) {
assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable());
httpResponse.setEntity(entity);
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
//return the same body that was sent
if (request instanceof HttpEntityEnclosingRequest) {
HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity();
if (entity != null) {
assertTrue("the entity is not repeatable, cannot set it to the response directly",
entity.isRepeatable());
httpResponse.setEntity(entity);
}
}
//return the same headers that were sent
httpResponse.setHeaders(request.getAllHeaders());
futureCallback.completed(httpResponse);
}
return null;
}
}
//return the same headers that were sent
httpResponse.setHeaders(request.getAllHeaders());
return httpResponse;
}
});
});
int numHeaders = RandomInts.randomIntBetween(getRandom(), 0, 3);
defaultHeaders = new Header[numHeaders];
for (int i = 0; i < numHeaders; i++) {
@ -128,21 +140,22 @@ public class RestClientSingleHostTests extends RestClientTestCase {
defaultHeaders[i] = new BasicHeader(headerName, headerValue);
}
httpHost = new HttpHost("localhost", 9200);
failureListener = new TrackingFailureListener();
restClient = RestClient.builder(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders)
.setFailureListener(failureListener).build();
failureListener = new HostsTrackingFailureListener();
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, failureListener);
}
/**
* Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client
*/
@SuppressWarnings("unchecked")
public void testInternalHttpRequest() throws Exception {
ArgumentCaptor<HttpUriRequest> requestArgumentCaptor = ArgumentCaptor.forClass(HttpUriRequest.class);
ArgumentCaptor<HttpAsyncRequestProducer> requestArgumentCaptor = ArgumentCaptor.forClass(HttpAsyncRequestProducer.class);
int times = 0;
for (String httpMethod : getHttpMethods()) {
HttpUriRequest expectedRequest = performRandomRequest(httpMethod);
verify(httpClient, times(++times)).execute(any(HttpHost.class), requestArgumentCaptor.capture());
HttpUriRequest actualRequest = requestArgumentCaptor.getValue();
verify(httpClient, times(++times)).<HttpResponse>execute(requestArgumentCaptor.capture(),
any(HttpAsyncResponseConsumer.class), any(FutureCallback.class));
HttpUriRequest actualRequest = (HttpUriRequest)requestArgumentCaptor.getValue().generateRequest();
assertEquals(expectedRequest.getURI(), actualRequest.getURI());
assertEquals(expectedRequest.getClass(), actualRequest.getClass());
assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders());
@ -156,7 +169,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
}
public void testSetNodes() throws IOException {
public void testSetHosts() throws IOException {
try {
restClient.setHosts((HttpHost[]) null);
fail("setHosts should have failed");
@ -186,11 +199,10 @@ public class RestClientSingleHostTests extends RestClientTestCase {
/**
* End to end test for ok status codes
*/
public void testOkStatusCodes() throws Exception {
public void testOkStatusCodes() throws IOException {
for (String method : getHttpMethods()) {
for (int okStatusCode : getOkStatusCodes()) {
Response response = restClient.performRequest(method, "/" + okStatusCode,
Collections.<String, String>emptyMap(), null);
Response response = performRequest(method, "/" + okStatusCode);
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
}
}
@ -200,12 +212,12 @@ public class RestClientSingleHostTests extends RestClientTestCase {
/**
* End to end test for error status codes: they should cause an exception to be thrown, apart from 404 with HEAD requests
*/
public void testErrorStatusCodes() throws Exception {
public void testErrorStatusCodes() throws IOException {
for (String method : getHttpMethods()) {
//error status codes should cause an exception to be thrown
for (int errorStatusCode : getAllErrorStatusCodes()) {
try (Response response = restClient.performRequest(method, "/" + errorStatusCode,
Collections.<String, String>emptyMap(), null)) {
try {
Response response = performRequest(method, "/" + errorStatusCode);
if (method.equals("HEAD") && errorStatusCode == 404) {
//no exception gets thrown although we got a 404
assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode));
@ -231,14 +243,14 @@ public class RestClientSingleHostTests extends RestClientTestCase {
for (String method : getHttpMethods()) {
//IOExceptions should be let bubble up
try {
restClient.performRequest(method, "/coe", Collections.<String, String>emptyMap(), null);
performRequest(method, "/coe");
fail("request should have failed");
} catch(IOException e) {
assertThat(e, instanceOf(ConnectTimeoutException.class));
}
failureListener.assertCalled(httpHost);
try {
restClient.performRequest(method, "/soe", Collections.<String, String>emptyMap(), null);
performRequest(method, "/soe");
fail("request should have failed");
} catch(IOException e) {
assertThat(e, instanceOf(SocketTimeoutException.class));
@ -251,16 +263,14 @@ public class RestClientSingleHostTests extends RestClientTestCase {
* End to end test for request and response body. Exercises the mock http client ability to send back
* whatever body it has received.
*/
public void testBody() throws Exception {
public void testBody() throws IOException {
String body = "{ \"field\": \"value\" }";
StringEntity entity = new StringEntity(body);
for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) {
for (int okStatusCode : getOkStatusCodes()) {
try (Response response = restClient.performRequest(method, "/" + okStatusCode,
Collections.<String, String>emptyMap(), entity)) {
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
}
Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.<String, String>emptyMap(), entity);
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
assertThat(EntityUtils.toString(response.getEntity()), equalTo(body));
}
for (int errorStatusCode : getAllErrorStatusCodes()) {
try {
@ -275,8 +285,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) {
try {
restClient.performRequest(method, "/" + randomStatusCode(getRandom()),
Collections.<String, String>emptyMap(), entity);
restClient.performRequest(method, "/" + randomStatusCode(getRandom()), Collections.<String, String>emptyMap(), entity);
fail("request should have failed");
} catch(UnsupportedOperationException e) {
assertThat(e.getMessage(), equalTo(method + " with body is not supported"));
@ -284,28 +293,34 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
}
public void testNullHeaders() throws Exception {
public void testNullHeaders() throws IOException {
String method = randomHttpMethod(getRandom());
int statusCode = randomStatusCode(getRandom());
try {
restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), null, (Header[])null);
performRequest(method, "/" + statusCode, (Header[])null);
fail("request should have failed");
} catch(NullPointerException e) {
assertEquals("request headers must not be null", e.getMessage());
}
try {
restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), null, (Header)null);
performRequest(method, "/" + statusCode, (Header)null);
fail("request should have failed");
} catch(NullPointerException e) {
assertEquals("request header must not be null", e.getMessage());
}
}
public void testNullParams() throws Exception {
public void testNullParams() throws IOException {
String method = randomHttpMethod(getRandom());
int statusCode = randomStatusCode(getRandom());
try {
restClient.performRequest(method, "/" + statusCode, null, null);
restClient.performRequest(method, "/" + statusCode, (Map<String, String>)null);
fail("request should have failed");
} catch(NullPointerException e) {
assertEquals("params must not be null", e.getMessage());
}
try {
restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null);
fail("request should have failed");
} catch(NullPointerException e) {
assertEquals("params must not be null", e.getMessage());
@ -316,7 +331,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
* End to end test for request and response headers. Exercises the mock http client ability to send back
* whatever headers it has received.
*/
public void testHeaders() throws Exception {
public void testHeaders() throws IOException {
for (String method : getHttpMethods()) {
Map<String, String> expectedHeaders = new HashMap<>();
for (Header defaultHeader : defaultHeaders) {
@ -333,9 +348,8 @@ public class RestClientSingleHostTests extends RestClientTestCase {
int statusCode = randomStatusCode(getRandom());
Response esResponse;
try (Response response = restClient.performRequest(method, "/" + statusCode,
Collections.<String, String>emptyMap(), null, headers)) {
esResponse = response;
try {
esResponse = restClient.performRequest(method, "/" + statusCode, headers);
} catch(ResponseException e) {
esResponse = e.getResponse();
}
@ -348,11 +362,12 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
}
private HttpUriRequest performRandomRequest(String method) throws IOException, URISyntaxException {
private HttpUriRequest performRandomRequest(String method) throws Exception {
String uriAsString = "/" + randomStatusCode(getRandom());
URIBuilder uriBuilder = new URIBuilder(uriAsString);
Map<String, String> params = Collections.emptyMap();
if (getRandom().nextBoolean()) {
boolean hasParams = randomBoolean();
if (hasParams) {
int numParams = RandomInts.randomIntBetween(getRandom(), 1, 3);
params = new HashMap<>(numParams);
for (int i = 0; i < numParams; i++) {
@ -395,7 +410,8 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
HttpEntity entity = null;
if (request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) {
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
if (hasBody) {
entity = new StringEntity(RandomStrings.randomAsciiOfLengthBetween(getRandom(), 10, 100));
((HttpEntityEnclosingRequest) request).setEntity(entity);
}
@ -418,10 +434,29 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
try {
restClient.performRequest(method, uriAsString, params, entity, headers);
if (hasParams == false && hasBody == false && randomBoolean()) {
restClient.performRequest(method, uriAsString, headers);
} else if (hasBody == false && randomBoolean()) {
restClient.performRequest(method, uriAsString, params, headers);
} else {
restClient.performRequest(method, uriAsString, params, entity, headers);
}
} catch(ResponseException e) {
//all good
}
return request;
}
private Response performRequest(String method, String endpoint, Header... headers) throws IOException {
switch(randomIntBetween(0, 2)) {
case 0:
return restClient.performRequest(method, endpoint, headers);
case 1:
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers);
case 2:
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), (HttpEntity)null, headers);
default:
throw new UnsupportedOperationException();
}
}
}

View File

@ -0,0 +1,172 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.HttpHost;
import org.apache.http.HttpResponse;
import org.apache.http.ProtocolVersion;
import org.apache.http.RequestLine;
import org.apache.http.StatusLine;
import org.apache.http.message.BasicHttpResponse;
import org.apache.http.message.BasicRequestLine;
import org.apache.http.message.BasicStatusLine;
import java.io.IOException;
import java.net.URISyntaxException;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.fail;
public class SyncResponseListenerTests extends RestClientTestCase {
public void testOnSuccessNullResponse() {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
try {
syncResponseListener.onSuccess(null);
fail("onSuccess should have failed");
} catch(NullPointerException e) {
assertEquals("response must not be null", e.getMessage());
}
}
public void testOnFailureNullException() {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
try {
syncResponseListener.onFailure(null);
fail("onFailure should have failed");
} catch(NullPointerException e) {
assertEquals("exception must not be null", e.getMessage());
}
}
public void testOnSuccess() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
Response mockResponse = mockResponse();
syncResponseListener.onSuccess(mockResponse);
Response response = syncResponseListener.get();
assertSame(response, mockResponse);
try {
syncResponseListener.onSuccess(mockResponse);
fail("get should have failed");
} catch(IllegalStateException e) {
assertEquals(e.getMessage(), "response is already set");
}
response = syncResponseListener.get();
assertSame(response, mockResponse);
RuntimeException runtimeException = new RuntimeException("test");
syncResponseListener.onFailure(runtimeException);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(IllegalStateException e) {
assertEquals("response and exception are unexpectedly set at the same time", e.getMessage());
assertNotNull(e.getSuppressed());
assertEquals(1, e.getSuppressed().length);
assertSame(runtimeException, e.getSuppressed()[0]);
}
}
public void testOnFailure() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
RuntimeException firstException = new RuntimeException("first-test");
syncResponseListener.onFailure(firstException);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(RuntimeException e) {
assertSame(firstException, e);
}
RuntimeException secondException = new RuntimeException("second-test");
try {
syncResponseListener.onFailure(secondException);
} catch(IllegalStateException e) {
assertEquals(e.getMessage(), "exception is already set");
}
try {
syncResponseListener.get();
fail("get should have failed");
} catch(RuntimeException e) {
assertSame(firstException, e);
}
Response response = mockResponse();
syncResponseListener.onSuccess(response);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(IllegalStateException e) {
assertEquals("response and exception are unexpectedly set at the same time", e.getMessage());
assertNotNull(e.getSuppressed());
assertEquals(1, e.getSuppressed().length);
assertSame(firstException, e.getSuppressed()[0]);
}
}
public void testRuntimeExceptionIsNotWrapped() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
RuntimeException runtimeException = new RuntimeException();
syncResponseListener.onFailure(runtimeException);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(RuntimeException e) {
assertSame(runtimeException, e);
}
}
public void testIOExceptionIsNotWrapped() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
IOException ioException = new IOException();
syncResponseListener.onFailure(ioException);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(IOException e) {
assertSame(ioException, e);
}
}
public void testExceptionIsWrapped() throws Exception {
RestClient.SyncResponseListener syncResponseListener = new RestClient.SyncResponseListener(10000);
//we just need any checked exception
URISyntaxException exception = new URISyntaxException("test", "test");
syncResponseListener.onFailure(exception);
try {
syncResponseListener.get();
fail("get should have failed");
} catch(RuntimeException e) {
assertEquals("error while performing request", e.getMessage());
assertSame(exception, e.getCause());
}
}
private static Response mockResponse() {
ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1);
RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion);
StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK");
HttpResponse httpResponse = new BasicHttpResponse(statusLine);
return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse);
}
}

View File

@ -26,6 +26,8 @@ apply plugin: 'ru.vyarus.animalsniffer'
targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
group = 'org.elasticsearch.client'
dependencies {
compile "org.elasticsearch.client:rest:${version}"
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
@ -56,6 +58,11 @@ forbiddenApisTest {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
}
dependencyLicenses {
mapping from: /http.*/, to: 'httpclient'
mapping from: /commons-.*/, to: 'commons'
}
//JarHell is part of es core, which we don't want to pull in
jarHell.enabled=false

View File

@ -1,6 +0,0 @@
Apache Commons Logging
Copyright 2003-2014 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -1 +0,0 @@
b31526a230871fbe285fbcbe2813f9c0839ae9b0

View File

@ -0,0 +1 @@
e7501a1b34325abb00d17dde96150604a0658b54

View File

@ -1,558 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
=========================================================================
This project includes Public Suffix List copied from
<https://publicsuffix.org/list/effective_tld_names.dat>
licensed under the terms of the Mozilla Public License, v. 2.0
Full license text: <http://mozilla.org/MPL/2.0/>
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@ -1,6 +0,0 @@
Apache HttpComponents Client
Copyright 1999-2016 The Apache Software Foundation
This product includes software developed at
The Apache Software Foundation (http://www.apache.org/).

View File

@ -62,9 +62,8 @@ public class HostsSniffer {
* Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts
*/
public List<HttpHost> sniffHosts() throws IOException {
try (Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams, null)) {
return readHosts(response.getEntity());
}
Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams);
return readHosts(response.getEntity());
}
private List<HttpHost> readHosts(HttpEntity entity) throws IOException {
@ -156,7 +155,7 @@ public class HostsSniffer {
private final RestClient restClient;
private long sniffRequestTimeoutMillis = DEFAULT_SNIFF_REQUEST_TIMEOUT;
private Scheme scheme;
private Scheme scheme = Scheme.HTTP;
private Builder(RestClient restClient) {
Objects.requireNonNull(restClient, "restClient cannot be null");

View File

@ -22,7 +22,6 @@ package org.elasticsearch.client.sniff;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import java.io.IOException;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicBoolean;
@ -55,7 +54,7 @@ public class SniffOnFailureListener extends RestClient.FailureListener {
}
@Override
public void onFailure(HttpHost host) throws IOException {
public void onFailure(HttpHost host) {
if (sniffer == null) {
throw new IllegalStateException("sniffer was not set, unable to sniff on failure");
}

View File

@ -23,6 +23,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import java.io.Closeable;
import java.io.IOException;
@ -39,7 +40,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
* Must be created via {@link Builder}, which allows to set all of the different options or rely on defaults.
* A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance.
* It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to
* {@link org.elasticsearch.client.RestClient.Builder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation
* {@link RestClientBuilder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation
* needs to be lazily set to the previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}.
*/
public final class Sniffer implements Closeable {

View File

@ -43,7 +43,6 @@ import java.io.OutputStream;
import java.io.StringWriter;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
@ -86,10 +85,14 @@ public class HostsSnifferTests extends RestClientTestCase {
httpServer.stop(0);
}
public void testSniffNodes() throws IOException, URISyntaxException {
public void testSniffNodes() throws IOException {
HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
try (RestClient restClient = RestClient.builder(httpHost).build()) {
HostsSniffer sniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme);
HostsSniffer.Builder builder = HostsSniffer.builder(restClient).setSniffRequestTimeoutMillis(sniffRequestTimeout);
if (scheme != HostsSniffer.Scheme.HTTP || randomBoolean()) {
builder.setScheme(scheme);
}
HostsSniffer sniffer = builder.build();
try {
List<HttpHost> sniffedHosts = sniffer.sniffHosts();
if (sniffResponse.isFailure) {

View File

@ -45,16 +45,17 @@ public class SniffOnFailureListenerTests extends RestClientTestCase {
assertEquals("sniffer must not be null", e.getMessage());
}
RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build();
try (Sniffer sniffer = Sniffer.builder(restClient, new MockHostsSniffer()).build()) {
listener.setSniffer(sniffer);
try {
try (RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build()) {
try (Sniffer sniffer = Sniffer.builder(restClient, new MockHostsSniffer()).build()) {
listener.setSniffer(sniffer);
fail("should have failed");
} catch(IllegalStateException e) {
assertEquals("sniffer can only be set once", e.getMessage());
try {
listener.setSniffer(sniffer);
fail("should have failed");
} catch(IllegalStateException e) {
assertEquals("sniffer can only be set once", e.getMessage());
}
listener.onFailure(new HttpHost("localhost", 9200));
}
listener.onFailure(new HttpHost("localhost", 9200));
}
}
}

View File

@ -26,6 +26,9 @@ apply plugin: 'ru.vyarus.animalsniffer'
targetCompatibility = JavaVersion.VERSION_1_7
sourceCompatibility = JavaVersion.VERSION_1_7
install.enabled = false
uploadArchives.enabled = false
dependencies {
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
compile "junit:junit:${versions.junit}"

View File

@ -0,0 +1,54 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'elasticsearch.build'
group = 'org.elasticsearch.client'
dependencies {
compile "org.elasticsearch:elasticsearch:${version}"
compile project(path: ':modules:transport-netty3', configuration: 'runtime')
compile project(path: ':modules:transport-netty4', configuration: 'runtime')
compile project(path: ':modules:reindex', configuration: 'runtime')
compile project(path: ':modules:lang-mustache', configuration: 'runtime')
compile project(path: ':modules:percolator', configuration: 'runtime')
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
testCompile "junit:junit:${versions.junit}"
}
dependencyLicenses {
dependencies = project.configurations.runtime.fileCollection {
it.group.startsWith('org.elasticsearch') == false
}
}
forbiddenApisTest {
// we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to
// be pulled in
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')]
}
namingConventions {
testClass = 'com.carrotsearch.randomizedtesting.RandomizedTest'
//we don't have integration tests
skipIntegTestInDisguise = true
}

View File

@ -0,0 +1,91 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.reindex.ReindexPlugin;
import org.elasticsearch.percolator.PercolatorPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.mustache.MustachePlugin;
import org.elasticsearch.transport.Netty3Plugin;
import org.elasticsearch.transport.Netty4Plugin;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
/**
* A builder to create an instance of {@link TransportClient}
* This class pre-installs the
* {@link Netty3Plugin},
* {@link Netty4Plugin},
* {@link ReindexPlugin},
* {@link PercolatorPlugin},
* and {@link MustachePlugin}
* for the client. These plugins are all elasticsearch core modules required.
*/
@SuppressWarnings({"unchecked","varargs"})
public class PreBuiltTransportClient extends TransportClient {
private static final Collection<Class<? extends Plugin>> PRE_INSTALLED_PLUGINS =
Collections.unmodifiableList(
Arrays.asList(
Netty3Plugin.class,
Netty4Plugin.class,
TransportPlugin.class,
ReindexPlugin.class,
PercolatorPlugin.class,
MustachePlugin.class));
@SafeVarargs
public PreBuiltTransportClient(Settings settings, Class<? extends Plugin>... plugins) {
this(settings, Arrays.asList(plugins));
}
public PreBuiltTransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS));
}
public static final class TransportPlugin extends Plugin {
private static final Setting<Boolean> ASSERT_NETTY_BUGLEVEL =
Setting.boolSetting("netty.assert.buglevel", true, Setting.Property.NodeScope);
@Override
public List<Setting<?>> getSettings() {
return Collections.singletonList(ASSERT_NETTY_BUGLEVEL);
}
@Override
public Settings additionalSettings() {
return Settings.builder()
.put(NetworkModule.TRANSPORT_TYPE_KEY, Netty3Plugin.NETTY_TRANSPORT_NAME)
.put(NetworkModule.HTTP_TYPE_KEY, Netty3Plugin.NETTY_HTTP_TRANSPORT_NAME)
.put("netty.assert.buglevel", true)
.build();
}
}
}

View File

@ -0,0 +1,62 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.client;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.reindex.ReindexPlugin;
import org.elasticsearch.percolator.PercolatorPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.mustache.MustachePlugin;
import org.elasticsearch.transport.Netty3Plugin;
import org.junit.Test;
import java.util.Arrays;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class PreBuiltTransportClientTests extends RandomizedTest {
@Test
public void testPluginInstalled() {
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
Settings settings = client.settings();
assertEquals(Netty3Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
}
}
@Test
public void testInstallPluginTwice() {
for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class,
MustachePlugin.class)) {
try {
new PreBuiltTransportClient(Settings.EMPTY, plugin);
fail("exception expected");
} catch (IllegalArgumentException ex) {
assertEquals("plugin is already installed", ex.getMessage());
}
}
}
}

View File

@ -56,10 +56,10 @@ dependencies {
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
compile 'org.elasticsearch:securesm:1.0'
compile 'org.elasticsearch:securesm:1.1'
// utilities
compile 'net.sf.jopt-simple:jopt-simple:4.9'
compile 'net.sf.jopt-simple:jopt-simple:5.0.2'
compile 'com.carrotsearch:hppc:0.7.1'
// time handling, remove with java 8 time
@ -74,8 +74,6 @@ dependencies {
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
// network stack
compile 'io.netty:netty:3.10.5.Final'
// percentiles aggregation
compile 'com.tdunning:t-digest:3.0'
// precentil ranks aggregation
@ -152,26 +150,11 @@ processResources {
}
thirdPartyAudit.excludes = [
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
// classes are missing!
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
'com.fasterxml.jackson.databind.ObjectMapper',
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder (netty)
'com.google.protobuf.CodedInputStream',
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender (netty)
'com.google.protobuf.CodedOutputStream',
// from org.jboss.netty.handler.codec.protobuf.ProtobufDecoder (netty)
'com.google.protobuf.ExtensionRegistry',
'com.google.protobuf.MessageLite$Builder',
'com.google.protobuf.MessageLite',
'com.google.protobuf.Parser',
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
'javax.jms.Message',
'javax.jms.MessageListener',
@ -196,72 +179,8 @@ thirdPartyAudit.excludes = [
'javax.mail.internet.MimeMessage',
'javax.mail.internet.MimeMultipart',
'javax.mail.internet.MimeUtility',
// from org.jboss.netty.channel.socket.http.HttpTunnelingServlet (netty)
'javax.servlet.ServletConfig',
'javax.servlet.ServletException',
'javax.servlet.ServletOutputStream',
'javax.servlet.http.HttpServlet',
'javax.servlet.http.HttpServletRequest',
'javax.servlet.http.HttpServletResponse',
// from org.jboss.netty.logging.CommonsLoggerFactory (netty)
'org.apache.commons.logging.Log',
'org.apache.commons.logging.LogFactory',
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
'org.apache.tomcat.jni.Buffer',
'org.apache.tomcat.jni.Library',
'org.apache.tomcat.jni.Pool',
'org.apache.tomcat.jni.SSL',
'org.apache.tomcat.jni.SSLContext',
// from org.jboss.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty)
'org.bouncycastle.asn1.x500.X500Name',
'org.bouncycastle.cert.X509v3CertificateBuilder',
'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter',
'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder',
'org.bouncycastle.jce.provider.BouncyCastleProvider',
'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder',
// from org.jboss.netty.handler.ssl.JettyNpnSslEngine (netty)
'org.eclipse.jetty.npn.NextProtoNego$ClientProvider',
'org.eclipse.jetty.npn.NextProtoNego$ServerProvider',
'org.eclipse.jetty.npn.NextProtoNego',
// from org.jboss.netty.logging.JBossLoggerFactory (netty)
'org.jboss.logging.Logger',
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteInput (netty)
'org.jboss.marshalling.ByteInput',
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty)
'org.jboss.marshalling.ByteOutput',
// from org.jboss.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty)
'org.jboss.marshalling.Marshaller',
// from org.jboss.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty)
'org.jboss.marshalling.MarshallerFactory',
'org.jboss.marshalling.MarshallingConfiguration',
'org.jboss.marshalling.Unmarshaller',
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
'org.noggit.JSONParser',
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
'org.osgi.framework.BundleActivator',
'org.osgi.framework.BundleContext',
// from org.jboss.netty.logging.OsgiLoggerFactory$1 (netty)
'org.osgi.framework.ServiceReference',
'org.osgi.service.log.LogService',
'org.osgi.util.tracker.ServiceTracker',
'org.osgi.util.tracker.ServiceTrackerCustomizer',
// from org.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
'org.slf4j.Logger',
'org.slf4j.LoggerFactory',
]
// dependency license are currently checked in distribution

View File

@ -1110,7 +1110,7 @@ public long ramBytesUsed() {
this.analyzed.copyBytes(analyzed);
}
private final static class SurfaceFormAndPayload implements Comparable<SurfaceFormAndPayload> {
private static final class SurfaceFormAndPayload implements Comparable<SurfaceFormAndPayload> {
BytesRef payload;
long weight;

View File

@ -27,6 +27,7 @@ import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
@ -71,6 +72,9 @@ public class CustomFieldQuery extends FieldQuery {
} else if (sourceQuery instanceof BlendedTermQuery) {
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
} else if (sourceQuery instanceof ToParentBlockJoinQuery) {
ToParentBlockJoinQuery blockJoinQuery = (ToParentBlockJoinQuery) sourceQuery;
flatten(blockJoinQuery.getChildQuery(), reader, flatQueries, boost);
} else {
super.flatten(sourceQuery, reader, flatQueries, boost);
}

View File

@ -26,7 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
*/
public class StoreRateLimiting {
public static interface Provider {
public interface Provider {
StoreRateLimiting rateLimiting();
}

View File

@ -30,9 +30,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.transport.TcpTransport;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
@ -99,18 +99,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
}
public ElasticsearchException(StreamInput in) throws IOException {
super(in.readOptionalString(), in.readThrowable());
super(in.readOptionalString(), in.readException());
readStackTrace(this, in);
int numKeys = in.readVInt();
for (int i = 0; i < numKeys; i++) {
final String key = in.readString();
final int numValues = in.readVInt();
final ArrayList<String> values = new ArrayList<>(numValues);
for (int j = 0; j < numValues; j++) {
values.add(in.readString());
}
headers.put(key, values);
}
headers.putAll(in.readMapOfLists());
}
/**
@ -161,7 +152,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
* Unwraps the actual cause from the exception for cases when the exception is a
* {@link ElasticsearchWrapperException}.
*
* @see org.elasticsearch.ExceptionsHelper#unwrapCause(Throwable)
* @see ExceptionsHelper#unwrapCause(Throwable)
*/
public Throwable unwrapCause() {
return ExceptionsHelper.unwrapCause(this);
@ -203,16 +194,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(this.getMessage());
out.writeThrowable(this.getCause());
out.writeException(this.getCause());
writeStackTraces(this, out);
out.writeVInt(headers.size());
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {
out.writeString(entry.getKey());
out.writeVInt(entry.getValue().size());
for (String v : entry.getValue()) {
out.writeString(v);
}
}
out.writeMapOfLists(headers);
}
public static ElasticsearchException readException(StreamInput input, int id) throws IOException {
@ -414,7 +398,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
int numSuppressed = in.readVInt();
for (int i = 0; i < numSuppressed; i++) {
throwable.addSuppressed(in.readThrowable());
throwable.addSuppressed(in.readException());
}
return throwable;
}
@ -434,7 +418,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
Throwable[] suppressed = throwable.getSuppressed();
out.writeVInt(suppressed.length);
for (Throwable t : suppressed) {
out.writeThrowable(t);
out.writeException(t);
}
return throwable;
}
@ -496,7 +480,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.index.shard.IndexShardStartedException::new, 23),
SEARCH_CONTEXT_MISSING_EXCEPTION(org.elasticsearch.search.SearchContextMissingException.class,
org.elasticsearch.search.SearchContextMissingException::new, 24),
GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class,
GENERAL_SCRIPT_EXCEPTION(org.elasticsearch.script.GeneralScriptException.class,
org.elasticsearch.script.GeneralScriptException::new, 25),
BATCH_OPERATION_EXCEPTION(org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException.class,
org.elasticsearch.index.shard.TranslogRecoveryPerformer.BatchOperationException::new, 26),
@ -674,10 +658,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class,
org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class,
org.elasticsearch.script.Script.ScriptParseException::new, 124),
HTTP_ON_TRANSPORT_EXCEPTION(org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException.class,
org.elasticsearch.transport.netty.SizeHeaderFrameDecoder.HttpOnTransportException::new, 125),
// 124 used to be Script.ScriptParseException
HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class,
TcpTransport.HttpOnTransportException::new, 125),
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
org.elasticsearch.index.mapper.MapperParsingException::new, 126),
SEARCH_CONTEXT_EXCEPTION(org.elasticsearch.search.SearchContextException.class,
@ -708,7 +691,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.index.query.QueryShardException::new, 141),
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
ShardStateAction.NoLongerPrimaryShardException::new, 142),
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143);
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143),
NOT_MASTER_EXCEPTION(org.elasticsearch.cluster.NotMasterException.class, org.elasticsearch.cluster.NotMasterException::new, 144);
final Class<? extends ElasticsearchException> exceptionClass;
@ -793,9 +777,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
return null;
}
public static void renderThrowable(XContentBuilder builder, Params params, Throwable t) throws IOException {
public static void renderException(XContentBuilder builder, Params params, Exception e) throws IOException {
builder.startObject("error");
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(e);
builder.field("root_cause");
builder.startArray();
for (ElasticsearchException rootCause : rootCauses) {
@ -805,7 +789,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
builder.endObject();
}
builder.endArray();
ElasticsearchException.toXContent(builder, params, t);
ElasticsearchException.toXContent(builder, params, e);
builder.endObject();
}

View File

@ -36,7 +36,7 @@ public class ElasticsearchSecurityException extends ElasticsearchException {
this.status = status ;
}
public ElasticsearchSecurityException(String msg, Throwable cause, Object... args) {
public ElasticsearchSecurityException(String msg, Exception cause, Object... args) {
this(msg, ExceptionsHelper.status(cause), cause, args);
}

View File

@ -37,25 +37,22 @@ import java.util.HashSet;
import java.util.List;
import java.util.Set;
/**
*
*/
public final class ExceptionsHelper {
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
public static RuntimeException convertToRuntime(Throwable t) {
if (t instanceof RuntimeException) {
return (RuntimeException) t;
public static RuntimeException convertToRuntime(Exception e) {
if (e instanceof RuntimeException) {
return (RuntimeException) e;
}
return new ElasticsearchException(t);
return new ElasticsearchException(e);
}
public static ElasticsearchException convertToElastic(Throwable t) {
if (t instanceof ElasticsearchException) {
return (ElasticsearchException) t;
public static ElasticsearchException convertToElastic(Exception e) {
if (e instanceof ElasticsearchException) {
return (ElasticsearchException) e;
}
return new ElasticsearchException(t);
return new ElasticsearchException(e);
}
public static RestStatus status(Throwable t) {
@ -89,15 +86,14 @@ public final class ExceptionsHelper {
return result;
}
/**
* @deprecated Don't swallow exceptions, allow them to propagate.
*/
@Deprecated
public static String detailedMessage(Throwable t) {
return detailedMessage(t, false, 0);
}
public static String detailedMessage(Throwable t, boolean newLines, int initialCounter) {
if (t == null) {
return "Unknown";
}
int counter = initialCounter + 1;
if (t.getCause() != null) {
StringBuilder sb = new StringBuilder();
while (t != null) {
@ -107,21 +103,11 @@ public final class ExceptionsHelper {
sb.append(t.getMessage());
sb.append("]");
}
if (!newLines) {
sb.append("; ");
}
sb.append("; ");
t = t.getCause();
if (t != null) {
if (newLines) {
sb.append("\n");
for (int i = 0; i < counter; i++) {
sb.append("\t");
}
} else {
sb.append("nested: ");
}
sb.append("nested: ");
}
counter++;
}
return sb.toString();
} else {
@ -175,8 +161,8 @@ public final class ExceptionsHelper {
}
public static IOException unwrapCorruption(Throwable t) {
return (IOException) unwrap(t, CorruptIndexException.class,
IndexFormatTooOldException.class,
return (IOException) unwrap(t, CorruptIndexException.class,
IndexFormatTooOldException.class,
IndexFormatTooNewException.class);
}
@ -220,7 +206,6 @@ public final class ExceptionsHelper {
return true;
}
/**
* Deduplicate the failures by exception message and index.
*/

View File

@ -69,6 +69,8 @@ public class Version {
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_3_3_ID = 2030399;
public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_2_3_4_ID = 2030499;
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
public static final int V_5_0_0_alpha1_ID = 5000001;
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final int V_5_0_0_alpha2_ID = 5000002;
@ -77,7 +79,9 @@ public class Version {
public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
public static final int V_5_0_0_alpha4_ID = 5000004;
public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
public static final Version CURRENT = V_5_0_0_alpha4;
public static final int V_5_0_0_alpha5_ID = 5000005;
public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
public static final Version CURRENT = V_5_0_0_alpha5;
static {
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
@ -90,6 +94,8 @@ public class Version {
public static Version fromId(int id) {
switch (id) {
case V_5_0_0_alpha5_ID:
return V_5_0_0_alpha5;
case V_5_0_0_alpha4_ID:
return V_5_0_0_alpha4;
case V_5_0_0_alpha3_ID:
@ -98,6 +104,8 @@ public class Version {
return V_5_0_0_alpha2;
case V_5_0_0_alpha1_ID:
return V_5_0_0_alpha1;
case V_2_3_4_ID:
return V_2_3_4;
case V_2_3_3_ID:
return V_2_3_3;
case V_2_3_2_ID:

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action;
import java.util.function.Consumer;
/**
* A listener for action responses or failures.
*/
@ -32,5 +34,32 @@ public interface ActionListener<Response> {
/**
* A failure caused by an exception at some phase of the task.
*/
void onFailure(Throwable e);
void onFailure(Exception e);
/**
* Creates a listener that listens for a response (or failure) and executes the
* corresponding consumer when the response (or failure) is received.
*
* @param onResponse the consumer of the response, when the listener receives one
* @param onFailure the consumer of the failure, when the listener receives one
* @param <Response> the type of the response
* @return a listener that listens for responses and invokes the consumer when received
*/
static <Response> ActionListener<Response> wrap(Consumer<Response> onResponse, Consumer<Exception> onFailure) {
return new ActionListener<Response>() {
@Override
public void onResponse(Response response) {
try {
onResponse.accept(response);
} catch (Exception e) {
onFailure(e);
}
}
@Override
public void onFailure(Exception e) {
onFailure.accept(e);
}
};
}
}

View File

@ -20,7 +20,7 @@
package org.elasticsearch.action;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.BaseTransportResponseHandler;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportResponse;
@ -31,7 +31,7 @@ import java.util.function.Supplier;
* A simple base class for action response listeners, defaulting to using the SAME executor (as its
* very common on response handlers).
*/
public class ActionListenerResponseHandler<Response extends TransportResponse> extends BaseTransportResponseHandler<Response> {
public class ActionListenerResponseHandler<Response extends TransportResponse> implements TransportResponseHandler<Response> {
private final ActionListener<Response> listener;
private final Supplier<Response> responseSupplier;

View File

@ -19,6 +19,14 @@
package org.elasticsearch.action;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
@ -194,14 +202,114 @@ import org.elasticsearch.common.NamedRegistry;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.MapBinder;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.ActionPlugin.ActionHandler;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.rest.action.admin.cluster.allocation.RestClusterAllocationExplainAction;
import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction;
import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestCancelTasksAction;
import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestGetTaskAction;
import org.elasticsearch.rest.action.admin.cluster.node.tasks.RestListTasksAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction;
import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction;
import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction;
import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction;
import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction;
import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestDeleteStoredScriptAction;
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestGetStoredScriptAction;
import org.elasticsearch.rest.action.admin.cluster.storedscripts.RestPutStoredScriptAction;
import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction;
import org.elasticsearch.rest.action.admin.indices.RestShrinkIndexAction;
import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction;
import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction;
import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction;
import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction;
import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction;
import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction;
import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction;
import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction;
import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction;
import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction;
import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction;
import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction;
import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction;
import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction;
import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction;
import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction;
import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction;
import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction;
import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction;
import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction;
import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction;
import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction;
import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction;
import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction;
import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction;
import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction;
import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
import org.elasticsearch.rest.action.bulk.RestBulkAction;
import org.elasticsearch.rest.action.cat.AbstractCatAction;
import org.elasticsearch.rest.action.cat.RestAliasAction;
import org.elasticsearch.rest.action.cat.RestAllocationAction;
import org.elasticsearch.rest.action.cat.RestCatAction;
import org.elasticsearch.rest.action.cat.RestFielddataAction;
import org.elasticsearch.rest.action.cat.RestHealthAction;
import org.elasticsearch.rest.action.cat.RestIndicesAction;
import org.elasticsearch.rest.action.cat.RestMasterAction;
import org.elasticsearch.rest.action.cat.RestNodeAttrsAction;
import org.elasticsearch.rest.action.cat.RestNodesAction;
import org.elasticsearch.rest.action.cat.RestPluginsAction;
import org.elasticsearch.rest.action.cat.RestRepositoriesAction;
import org.elasticsearch.rest.action.cat.RestSegmentsAction;
import org.elasticsearch.rest.action.cat.RestShardsAction;
import org.elasticsearch.rest.action.cat.RestSnapshotAction;
import org.elasticsearch.rest.action.cat.RestTasksAction;
import org.elasticsearch.rest.action.cat.RestThreadPoolAction;
import org.elasticsearch.rest.action.delete.RestDeleteAction;
import org.elasticsearch.rest.action.explain.RestExplainAction;
import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction;
import org.elasticsearch.rest.action.get.RestGetAction;
import org.elasticsearch.rest.action.get.RestGetSourceAction;
import org.elasticsearch.rest.action.get.RestHeadAction;
import org.elasticsearch.rest.action.get.RestMultiGetAction;
import org.elasticsearch.rest.action.index.RestIndexAction;
import org.elasticsearch.rest.action.ingest.RestDeletePipelineAction;
import org.elasticsearch.rest.action.ingest.RestGetPipelineAction;
import org.elasticsearch.rest.action.ingest.RestPutPipelineAction;
import org.elasticsearch.rest.action.ingest.RestSimulatePipelineAction;
import org.elasticsearch.rest.action.main.RestMainAction;
import org.elasticsearch.rest.action.search.RestClearScrollAction;
import org.elasticsearch.rest.action.search.RestMultiSearchAction;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.rest.action.search.RestSearchScrollAction;
import org.elasticsearch.rest.action.suggest.RestSuggestAction;
import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
import org.elasticsearch.rest.action.update.RestUpdateAction;
import static java.util.Collections.unmodifiableList;
import static java.util.Collections.unmodifiableMap;
@ -212,21 +320,32 @@ import static java.util.Collections.unmodifiableMap;
public class ActionModule extends AbstractModule {
private final boolean transportClient;
private final Settings settings;
private final List<ActionPlugin> actionPlugins;
private final Map<String, ActionHandler<?, ?>> actions;
private final List<Class<? extends ActionFilter>> actionFilters;
private final AutoCreateIndex autoCreateIndex;
private final DestructiveOperations destructiveOperations;
private final RestController restController;
public ActionModule(boolean ingestEnabled, boolean transportClient, Settings settings, IndexNameExpressionResolver resolver,
ClusterSettings clusterSettings, List<ActionPlugin> actionPlugins) {
this.transportClient = transportClient;
this.settings = settings;
this.actionPlugins = actionPlugins;
actions = setupActions(actionPlugins);
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
restController = new RestController(settings, headers);
}
private Map<String, ActionHandler<?, ?>> setupActions(List<ActionPlugin> actionPlugins) {
public Map<String, ActionHandler<?, ?>> getActions() {
return actions;
}
static Map<String, ActionHandler<?, ?>> setupActions(List<ActionPlugin> actionPlugins) {
// Subclass NamedRegistry for easy registration
class ActionRegistry extends NamedRegistry<ActionHandler<?, ?>> {
public ActionRegistry() {
@ -357,6 +476,149 @@ public class ActionModule extends AbstractModule {
return unmodifiableList(filters);
}
static Set<Class<? extends RestHandler>> setupRestHandlers(List<ActionPlugin> actionPlugins) {
Set<Class<? extends RestHandler>> handlers = new HashSet<>();
registerRestHandler(handlers, RestMainAction.class);
registerRestHandler(handlers, RestNodesInfoAction.class);
registerRestHandler(handlers, RestNodesStatsAction.class);
registerRestHandler(handlers, RestNodesHotThreadsAction.class);
registerRestHandler(handlers, RestClusterAllocationExplainAction.class);
registerRestHandler(handlers, RestClusterStatsAction.class);
registerRestHandler(handlers, RestClusterStateAction.class);
registerRestHandler(handlers, RestClusterHealthAction.class);
registerRestHandler(handlers, RestClusterUpdateSettingsAction.class);
registerRestHandler(handlers, RestClusterGetSettingsAction.class);
registerRestHandler(handlers, RestClusterRerouteAction.class);
registerRestHandler(handlers, RestClusterSearchShardsAction.class);
registerRestHandler(handlers, RestPendingClusterTasksAction.class);
registerRestHandler(handlers, RestPutRepositoryAction.class);
registerRestHandler(handlers, RestGetRepositoriesAction.class);
registerRestHandler(handlers, RestDeleteRepositoryAction.class);
registerRestHandler(handlers, RestVerifyRepositoryAction.class);
registerRestHandler(handlers, RestGetSnapshotsAction.class);
registerRestHandler(handlers, RestCreateSnapshotAction.class);
registerRestHandler(handlers, RestRestoreSnapshotAction.class);
registerRestHandler(handlers, RestDeleteSnapshotAction.class);
registerRestHandler(handlers, RestSnapshotsStatusAction.class);
registerRestHandler(handlers, RestIndicesExistsAction.class);
registerRestHandler(handlers, RestTypesExistsAction.class);
registerRestHandler(handlers, RestGetIndicesAction.class);
registerRestHandler(handlers, RestIndicesStatsAction.class);
registerRestHandler(handlers, RestIndicesSegmentsAction.class);
registerRestHandler(handlers, RestIndicesShardStoresAction.class);
registerRestHandler(handlers, RestGetAliasesAction.class);
registerRestHandler(handlers, RestAliasesExistAction.class);
registerRestHandler(handlers, RestIndexDeleteAliasesAction.class);
registerRestHandler(handlers, RestIndexPutAliasAction.class);
registerRestHandler(handlers, RestIndicesAliasesAction.class);
registerRestHandler(handlers, RestCreateIndexAction.class);
registerRestHandler(handlers, RestShrinkIndexAction.class);
registerRestHandler(handlers, RestRolloverIndexAction.class);
registerRestHandler(handlers, RestDeleteIndexAction.class);
registerRestHandler(handlers, RestCloseIndexAction.class);
registerRestHandler(handlers, RestOpenIndexAction.class);
registerRestHandler(handlers, RestUpdateSettingsAction.class);
registerRestHandler(handlers, RestGetSettingsAction.class);
registerRestHandler(handlers, RestAnalyzeAction.class);
registerRestHandler(handlers, RestGetIndexTemplateAction.class);
registerRestHandler(handlers, RestPutIndexTemplateAction.class);
registerRestHandler(handlers, RestDeleteIndexTemplateAction.class);
registerRestHandler(handlers, RestHeadIndexTemplateAction.class);
registerRestHandler(handlers, RestPutMappingAction.class);
registerRestHandler(handlers, RestGetMappingAction.class);
registerRestHandler(handlers, RestGetFieldMappingAction.class);
registerRestHandler(handlers, RestRefreshAction.class);
registerRestHandler(handlers, RestFlushAction.class);
registerRestHandler(handlers, RestSyncedFlushAction.class);
registerRestHandler(handlers, RestForceMergeAction.class);
registerRestHandler(handlers, RestUpgradeAction.class);
registerRestHandler(handlers, RestClearIndicesCacheAction.class);
registerRestHandler(handlers, RestIndexAction.class);
registerRestHandler(handlers, RestGetAction.class);
registerRestHandler(handlers, RestGetSourceAction.class);
registerRestHandler(handlers, RestHeadAction.Document.class);
registerRestHandler(handlers, RestHeadAction.Source.class);
registerRestHandler(handlers, RestMultiGetAction.class);
registerRestHandler(handlers, RestDeleteAction.class);
registerRestHandler(handlers, org.elasticsearch.rest.action.count.RestCountAction.class);
registerRestHandler(handlers, RestSuggestAction.class);
registerRestHandler(handlers, RestTermVectorsAction.class);
registerRestHandler(handlers, RestMultiTermVectorsAction.class);
registerRestHandler(handlers, RestBulkAction.class);
registerRestHandler(handlers, RestUpdateAction.class);
registerRestHandler(handlers, RestSearchAction.class);
registerRestHandler(handlers, RestSearchScrollAction.class);
registerRestHandler(handlers, RestClearScrollAction.class);
registerRestHandler(handlers, RestMultiSearchAction.class);
registerRestHandler(handlers, RestValidateQueryAction.class);
registerRestHandler(handlers, RestExplainAction.class);
registerRestHandler(handlers, RestRecoveryAction.class);
// Scripts API
registerRestHandler(handlers, RestGetStoredScriptAction.class);
registerRestHandler(handlers, RestPutStoredScriptAction.class);
registerRestHandler(handlers, RestDeleteStoredScriptAction.class);
registerRestHandler(handlers, RestFieldStatsAction.class);
// Tasks API
registerRestHandler(handlers, RestListTasksAction.class);
registerRestHandler(handlers, RestGetTaskAction.class);
registerRestHandler(handlers, RestCancelTasksAction.class);
// Ingest API
registerRestHandler(handlers, RestPutPipelineAction.class);
registerRestHandler(handlers, RestGetPipelineAction.class);
registerRestHandler(handlers, RestDeletePipelineAction.class);
registerRestHandler(handlers, RestSimulatePipelineAction.class);
// CAT API
registerRestHandler(handlers, RestCatAction.class);
registerRestHandler(handlers, RestAllocationAction.class);
registerRestHandler(handlers, RestShardsAction.class);
registerRestHandler(handlers, RestMasterAction.class);
registerRestHandler(handlers, RestNodesAction.class);
registerRestHandler(handlers, RestTasksAction.class);
registerRestHandler(handlers, RestIndicesAction.class);
registerRestHandler(handlers, RestSegmentsAction.class);
// Fully qualified to prevent interference with rest.action.count.RestCountAction
registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestCountAction.class);
// Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction
registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestRecoveryAction.class);
registerRestHandler(handlers, RestHealthAction.class);
registerRestHandler(handlers, org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class);
registerRestHandler(handlers, RestAliasAction.class);
registerRestHandler(handlers, RestThreadPoolAction.class);
registerRestHandler(handlers, RestPluginsAction.class);
registerRestHandler(handlers, RestFielddataAction.class);
registerRestHandler(handlers, RestNodeAttrsAction.class);
registerRestHandler(handlers, RestRepositoriesAction.class);
registerRestHandler(handlers, RestSnapshotAction.class);
for (ActionPlugin plugin : actionPlugins) {
for (Class<? extends RestHandler> handler : plugin.getRestHandlers()) {
registerRestHandler(handlers, handler);
}
}
return handlers;
}
private static void registerRestHandler(Set<Class<? extends RestHandler>> handlers, Class<? extends RestHandler> handler) {
if (handlers.contains(handler)) {
throw new IllegalArgumentException("can't register the same [rest_handler] more than once for [" + handler.getName() + "]");
}
handlers.add(handler);
}
@Override
protected void configure() {
Multibinder<ActionFilter> actionFilterMultibinder = Multibinder.newSetBinder(binder(), ActionFilter.class);
@ -366,19 +628,12 @@ public class ActionModule extends AbstractModule {
bind(ActionFilters.class).asEagerSingleton();
bind(DestructiveOperations.class).toInstance(destructiveOperations);
// register Name -> GenericAction Map that can be injected to instances.
@SuppressWarnings("rawtypes")
MapBinder<String, GenericAction> actionsBinder
= MapBinder.newMapBinder(binder(), String.class, GenericAction.class);
for (Map.Entry<String, ActionHandler<?, ?>> entry : actions.entrySet()) {
actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().getAction());
}
// register GenericAction -> transportAction Map that can be injected to instances.
// also register any supporting classes
if (false == transportClient) {
// Supporting classes only used when not a transport client
bind(AutoCreateIndex.class).toInstance(autoCreateIndex);
bind(TransportLivenessAction.class).asEagerSingleton();
// register GenericAction -> transportAction Map used by NodeClient
@SuppressWarnings("rawtypes")
MapBinder<GenericAction, TransportAction> transportActionsBinder
= MapBinder.newMapBinder(binder(), GenericAction.class, TransportAction.class);
@ -390,6 +645,23 @@ public class ActionModule extends AbstractModule {
bind(supportAction).asEagerSingleton();
}
}
// Bind the RestController which is required (by Node) even if rest isn't enabled.
bind(RestController.class).toInstance(restController);
// Setup the RestHandlers
if (NetworkModule.HTTP_ENABLED.get(settings)) {
Multibinder<RestHandler> restHandlers = Multibinder.newSetBinder(binder(), RestHandler.class);
Multibinder<AbstractCatAction> catHandlers = Multibinder.newSetBinder(binder(), AbstractCatAction.class);
for (Class<? extends RestHandler> handler : setupRestHandlers(actionPlugins)) {
bind(handler).asEagerSingleton();
if (AbstractCatAction.class.isAssignableFrom(handler)) {
catHandlers.addBinding().to(handler.asSubclass(AbstractCatAction.class));
} else {
restHandlers.addBinding().to(handler);
}
}
}
}
}
}

View File

@ -22,11 +22,11 @@ package org.elasticsearch.action;
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
/**
* Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Throwable)} in case an uncaught
* Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught
* exception or error is thrown while the actual action is run.
*/
public abstract class ActionRunnable<Response> extends AbstractRunnable {
protected final ActionListener<Response> listener;
public ActionRunnable(ActionListener<Response> listener) {
@ -34,11 +34,11 @@ public abstract class ActionRunnable<Response> extends AbstractRunnable {
}
/**
* Calls the action listeners {@link ActionListener#onFailure(Throwable)} method with the given exception.
* Calls the action listeners {@link ActionListener#onFailure(Exception)} method with the given exception.
* This method is invoked for all exception thrown by {@link #doRun()}
*/
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
public void onFailure(Exception e) {
listener.onFailure(e);
}
}

View File

@ -45,7 +45,7 @@ public class LatchedActionListener<T> implements ActionListener<T> {
}
@Override
public void onFailure(Throwable e) {
public void onFailure(Exception e) {
try {
delegate.onFailure(e);
} finally {

View File

@ -43,15 +43,15 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
private final long taskId;
private final Throwable reason;
private final Exception reason;
private final RestStatus status;
public TaskOperationFailure(String nodeId, long taskId, Throwable t) {
public TaskOperationFailure(String nodeId, long taskId, Exception e) {
this.nodeId = nodeId;
this.taskId = taskId;
this.reason = t;
status = ExceptionsHelper.status(t);
this.reason = e;
status = ExceptionsHelper.status(e);
}
/**
@ -60,7 +60,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
public TaskOperationFailure(StreamInput in) throws IOException {
nodeId = in.readString();
taskId = in.readLong();
reason = in.readThrowable();
reason = in.readException();
status = RestStatus.readFrom(in);
}
@ -68,7 +68,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
public void writeTo(StreamOutput out) throws IOException {
out.writeString(nodeId);
out.writeLong(taskId);
out.writeThrowable(reason);
out.writeException(reason);
RestStatus.writeTo(out, status);
}

View File

@ -35,7 +35,6 @@ public class TransportActionNodeProxy<Request extends ActionRequest, Response ex
private final GenericAction<Request, Response> action;
private final TransportRequestOptions transportOptions;
@Inject
public TransportActionNodeProxy(Settings settings, GenericAction<Request, Response> action, TransportService transportService) {
super(settings);
this.action = action;

View File

@ -52,6 +52,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
private Integer shard;
private Boolean primary;
private boolean includeYesDecisions = false;
private boolean includeDiskInfo = false;
/** Explain the first unassigned shard */
public ClusterAllocationExplainRequest() {
@ -134,6 +135,16 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
return this.includeYesDecisions;
}
/** {@code true} to include information about the gathered disk information of nodes in the cluster */
public void includeDiskInfo(boolean includeDiskInfo) {
this.includeDiskInfo = includeDiskInfo;
}
/** Returns true if information about disk usage and shard sizes should also be returned */
public boolean includeDiskInfo() {
return this.includeDiskInfo;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("ClusterAllocationExplainRequest[");
@ -164,6 +175,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
this.shard = in.readOptionalVInt();
this.primary = in.readOptionalBoolean();
this.includeYesDecisions = in.readBoolean();
this.includeDiskInfo = in.readBoolean();
}
@Override
@ -173,5 +185,6 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
out.writeOptionalVInt(shard);
out.writeOptionalBoolean(primary);
out.writeBoolean(includeYesDecisions);
out.writeBoolean(includeDiskInfo);
}
}

View File

@ -53,6 +53,18 @@ public class ClusterAllocationExplainRequestBuilder
return this;
}
/** Whether to include "YES" decider decisions in the response instead of only "NO" decisions */
public ClusterAllocationExplainRequestBuilder setIncludeYesDecisions(boolean includeYesDecisions) {
request.includeYesDecisions(includeYesDecisions);
return this;
}
/** Whether to include information about the gathered disk information of nodes in the cluster */
public ClusterAllocationExplainRequestBuilder setIncludeDiskInfo(boolean includeDiskInfo) {
request.includeDiskInfo(includeDiskInfo);
return this;
}
/**
* Signal that the first unassigned shard should be used
*/

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.cluster.allocation;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Nullable;
@ -48,10 +49,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
private final long allocationDelayMillis;
private final long remainingDelayMillis;
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
private final ClusterInfo clusterInfo;
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
Map<DiscoveryNode, NodeExplanation> nodeExplanations, @Nullable ClusterInfo clusterInfo) {
this.shard = shard;
this.primary = primary;
this.hasPendingAsyncFetch = hasPendingAsyncFetch;
@ -60,6 +62,7 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
this.allocationDelayMillis = allocationDelayMillis;
this.remainingDelayMillis = remainingDelayMillis;
this.nodeExplanations = nodeExplanations;
this.clusterInfo = clusterInfo;
}
public ClusterAllocationExplanation(StreamInput in) throws IOException {
@ -78,6 +81,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation);
}
this.nodeExplanations = nodeToExplanation;
if (in.readBoolean()) {
this.clusterInfo = new ClusterInfo(in);
} else {
this.clusterInfo = null;
}
}
@Override
@ -94,6 +102,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
for (NodeExplanation explanation : this.nodeExplanations.values()) {
explanation.writeTo(out);
}
if (this.clusterInfo != null) {
out.writeBoolean(true);
this.clusterInfo.writeTo(out);
} else {
out.writeBoolean(false);
}
}
/** Return the shard that the explanation is about */
@ -143,6 +157,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
return this.nodeExplanations;
}
/** Return the cluster disk info for the cluster or null if none available */
@Nullable
public ClusterInfo getClusterInfo() {
return this.clusterInfo;
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); {
builder.startObject("shard"); {
@ -164,11 +184,18 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
}
builder.startObject("nodes");
for (NodeExplanation explanation : nodeExplanations.values()) {
explanation.toXContent(builder, params);
builder.startObject("nodes"); {
for (NodeExplanation explanation : nodeExplanations.values()) {
explanation.toXContent(builder, params);
}
}
builder.endObject(); // end nodes
if (this.clusterInfo != null) {
builder.startObject("cluster_info"); {
this.clusterInfo.toXContent(builder, params);
}
builder.endObject(); // end "cluster_info"
}
}
builder.endObject(); // end wrapping object
return builder;

View File

@ -43,7 +43,7 @@ public class NodeExplanation implements Writeable, ToXContent {
private final String finalExplanation;
public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight,
final @Nullable IndicesShardStoresResponse.StoreStatus storeStatus,
@Nullable final IndicesShardStoresResponse.StoreStatus storeStatus,
final ClusterAllocationExplanation.FinalDecision finalDecision,
final String finalExplanation,
final ClusterAllocationExplanation.StoreCopy storeCopy) {

View File

@ -28,6 +28,7 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -145,7 +146,7 @@ public class TransportClusterAllocationExplainAction
// No copies of the data
storeCopy = ClusterAllocationExplanation.StoreCopy.NONE;
} else {
final Throwable storeErr = storeStatus.getStoreException();
final Exception storeErr = storeStatus.getStoreException();
if (storeErr != null) {
if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) {
storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT;
@ -219,7 +220,7 @@ public class TransportClusterAllocationExplainAction
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
boolean includeYesDecisions, ShardsAllocator shardAllocator,
List<IndicesShardStoresResponse.StoreStatus> shardStores,
GatewayAllocator gatewayAllocator) {
GatewayAllocator gatewayAllocator, ClusterInfo clusterInfo) {
// don't short circuit deciders, we want a full explanation
allocation.debugDecision(true);
// get the existing unassigned info if available
@ -262,16 +263,17 @@ public class TransportClusterAllocationExplainAction
explanations.put(node, nodeExplanation);
}
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations);
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations, clusterInfo);
}
@Override
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
final ActionListener<ClusterAllocationExplainResponse> listener) {
final RoutingNodes routingNodes = state.getRoutingNodes();
final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo();
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
clusterInfoService.getClusterInfo(), System.nanoTime(), false);
clusterInfo, System.nanoTime(), false);
ShardRouting foundShard = null;
if (request.useAnyUnassignedShard()) {
@ -318,12 +320,13 @@ public class TransportClusterAllocationExplainAction
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator);
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator,
request.includeDiskInfo() ? clusterInfo : null);
listener.onResponse(new ClusterAllocationExplainResponse(cae));
}
@Override
public void onFailure(Throwable e) {
public void onFailure(Exception e) {
listener.onFailure(e);
}
});

View File

@ -104,9 +104,9 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
listener.onFailure(t);
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
listener.onFailure(e);
}
@Override

View File

@ -46,8 +46,6 @@ import static java.util.Collections.unmodifiableMap;
* Node information (static, does not change over time).
*/
public class NodeInfo extends BaseNodeResponse {
@Nullable
private Map<String, String> serviceAttributes;
private Version version;
private Build build;
@ -85,14 +83,13 @@ public class NodeInfo extends BaseNodeResponse {
public NodeInfo() {
}
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings,
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest,
@Nullable ByteSizeValue totalIndexingBuffer) {
super(node);
this.version = version;
this.build = build;
this.serviceAttributes = serviceAttributes;
this.settings = settings;
this.os = os;
this.process = process;
@ -127,14 +124,6 @@ public class NodeInfo extends BaseNodeResponse {
return this.build;
}
/**
* The service attributes of the node.
*/
@Nullable
public Map<String, String> getServiceAttributes() {
return this.serviceAttributes;
}
/**
* The settings of the node.
*/
@ -213,14 +202,6 @@ public class NodeInfo extends BaseNodeResponse {
} else {
totalIndexingBuffer = null;
}
if (in.readBoolean()) {
Map<String, String> builder = new HashMap<>();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
builder.put(in.readString(), in.readString());
}
serviceAttributes = unmodifiableMap(builder);
}
if (in.readBoolean()) {
settings = Settings.readSettingsFromStream(in);
}
@ -262,16 +243,6 @@ public class NodeInfo extends BaseNodeResponse {
out.writeBoolean(true);
out.writeLong(totalIndexingBuffer.bytes());
}
if (getServiceAttributes() == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeVInt(serviceAttributes.size());
for (Map.Entry<String, String> entry : serviceAttributes.entrySet()) {
out.writeString(entry.getKey());
out.writeString(entry.getValue());
}
}
if (settings == null) {
out.writeBoolean(false);
} else {

View File

@ -73,12 +73,6 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
builder.byteSizeField("total_indexing_buffer", "total_indexing_buffer_in_bytes", nodeInfo.getTotalIndexingBuffer());
}
if (nodeInfo.getServiceAttributes() != null) {
for (Map.Entry<String, String> nodeAttribute : nodeInfo.getServiceAttributes().entrySet()) {
builder.field(nodeAttribute.getKey(), nodeAttribute.getValue());
}
}
builder.startArray("roles");
for (DiscoveryNode.Role role : nodeInfo.getNode().getRoles()) {
builder.value(role.getRoleName());

View File

@ -44,9 +44,9 @@ import org.elasticsearch.tasks.TaskId;
import org.elasticsearch.tasks.TaskInfo;
import org.elasticsearch.tasks.TaskPersistenceService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.BaseTransportResponseHandler;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -111,7 +111,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
GetTaskRequest nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId());
taskManager.registerChildTask(thisTask, node.getId());
transportService.sendRequest(node, GetTaskAction.NAME, nodeRequest, builder.build(),
new BaseTransportResponseHandler<GetTaskResponse>() {
new TransportResponseHandler<GetTaskResponse>() {
@Override
public GetTaskResponse newInstance() {
return new GetTaskResponse();
@ -154,8 +154,8 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
} else {
@ -179,7 +179,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
}
@Override
public void onFailure(Throwable e) {
public void onFailure(Exception e) {
/*
* We couldn't load the task from the task index. Instead of 404 we should use the snapshot we took after it finished. If
* the error isn't a 404 then we'll just throw it back to the user.
@ -207,13 +207,13 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
public void onResponse(GetResponse getResponse) {
try {
onGetFinishedTaskFromIndex(getResponse, listener);
} catch (Throwable e) {
} catch (Exception e) {
listener.onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
public void onFailure(Exception e) {
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
// We haven't yet created the index for the task results so it can't be found.
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or persisted", e, request.getTaskId()));

View File

@ -128,6 +128,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
* {@code group_by=nodes}.
*/
public void setDiscoveryNodes(DiscoveryNodes discoveryNodes) {
//WTF is this? Why isn't this set by default;
this.discoveryNodes = discoveryNodes;
}

View File

@ -76,7 +76,7 @@ public class TransportDeleteRepositoryAction extends TransportMasterNodeAction<D
}
@Override
public void onFailure(Throwable e) {
public void onFailure(Exception e) {
listener.onFailure(e);
}
});

View File

@ -80,7 +80,7 @@ public class TransportPutRepositoryAction extends TransportMasterNodeAction<PutR
}
@Override
public void onFailure(Throwable e) {
public void onFailure(Exception e) {
listener.onFailure(e);
}
});

View File

@ -78,7 +78,7 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeAction<V
}
@Override
public void onFailure(Throwable e) {
public void onFailure(Exception e) {
listener.onFailure(e);
}
});

View File

@ -102,9 +102,9 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
}
@Override
public void onFailure(String source, Throwable t) {
logger.debug("failed to perform [{}]", t, source);
super.onFailure(source, t);
public void onFailure(String source, Exception e) {
logger.debug("failed to perform [{}]", e, source);
super.onFailure(source, e);
}
@Override

View File

@ -93,11 +93,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
}
@Override
public void onAllNodesAcked(@Nullable Throwable t) {
public void onAllNodesAcked(@Nullable Exception e) {
if (changed) {
reroute(true);
} else {
super.onAllNodesAcked(t);
super.onAllNodesAcked(e);
}
}
@ -146,10 +146,10 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
}
@Override
public void onFailure(String source, Throwable t) {
public void onFailure(String source, Exception e) {
//if the reroute fails we only log
logger.debug("failed to perform [{}]", t, source);
listener.onFailure(new ElasticsearchException("reroute after update settings failed", t));
logger.debug("failed to perform [{}]", e, source);
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
}
@Override
@ -165,9 +165,9 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
}
@Override
public void onFailure(String source, Throwable t) {
logger.debug("failed to perform [{}]", t, source);
super.onFailure(source, t);
public void onFailure(String source, Exception e) {
logger.debug("failed to perform [{}]", e, source);
super.onFailure(source, e);
}
@Override

View File

@ -94,10 +94,10 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction<Cre
}
@Override
public void onSnapshotFailure(Snapshot snapshot, Throwable t) {
public void onSnapshotFailure(Snapshot snapshot, Exception e) {
if (snapshot.getRepository().equals(request.repository()) &&
snapshot.getSnapshotId().getName().equals(request.snapshot())) {
listener.onFailure(t);
listener.onFailure(e);
snapshotsService.removeListener(this);
}
}
@ -108,8 +108,8 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction<Cre
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}

View File

@ -72,8 +72,8 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeAction<Del
}
@Override
public void onFailure(Throwable t) {
listener.onFailure(t);
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}

View File

@ -120,8 +120,8 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, new ArrayList<>(toResolve), request.ignoreUnavailable()));
}
listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder));
} catch (Throwable t) {
listener.onFailure(t);
} catch (Exception e) {
listener.onFailure(e);
}
}

View File

@ -94,7 +94,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction<Re
}
@Override
public void onFailure(Throwable e) {
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
@ -104,7 +104,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction<Re
}
@Override
public void onFailure(Throwable t) {
public void onFailure(Exception t) {
listener.onFailure(t);
}
});

View File

@ -125,13 +125,13 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
List<SnapshotsInProgress.Entry> currentSnapshots =
snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots()));
listener.onResponse(buildResponse(request, currentSnapshots, nodeSnapshotStatuses));
} catch (Throwable e) {
} catch (Exception e) {
listener.onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
@ -207,15 +207,14 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
.filter(s -> requestedSnapshotNames.contains(s.getName()))
.collect(Collectors.toMap(SnapshotId::getName, Function.identity()));
for (final String snapshotName : request.snapshots()) {
if (currentSnapshotNames.contains(snapshotName)) {
// we've already found this snapshot in the current snapshot entries, so skip over
continue;
}
SnapshotId snapshotId = matchedSnapshotIds.get(snapshotName);
if (snapshotId == null) {
if (currentSnapshotNames.contains(snapshotName)) {
// we've already found this snapshot in the current snapshot entries, so skip over
continue;
} else {
// neither in the current snapshot entries nor found in the repository
throw new SnapshotMissingException(repositoryName, snapshotName);
}
// neither in the current snapshot entries nor found in the repository
throw new SnapshotMissingException(repositoryName, snapshotName);
}
SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId);
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();

View File

@ -119,7 +119,7 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction<Ind
}
@Override
public void onFailure(Throwable t) {
public void onFailure(Exception t) {
logger.debug("failed to perform aliases", t);
listener.onFailure(t);
}

View File

@ -18,14 +18,22 @@
*/
package org.elasticsearch.action.admin.indices.analyze;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
@ -39,11 +47,11 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
private String analyzer;
private String tokenizer;
private NameOrDefinition tokenizer;
private String[] tokenFilters = Strings.EMPTY_ARRAY;
private final List<NameOrDefinition> tokenFilters = new ArrayList<>();
private String[] charFilters = Strings.EMPTY_ARRAY;
private final List<NameOrDefinition> charFilters = new ArrayList<>();
private String field;
@ -51,6 +59,48 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
private String[] attributes = Strings.EMPTY_ARRAY;
public static class NameOrDefinition implements Writeable {
// exactly one of these two members is not null
public final String name;
public final Settings definition;
NameOrDefinition(String name) {
this.name = Objects.requireNonNull(name);
this.definition = null;
}
NameOrDefinition(Map<String, ?> definition) {
this.name = null;
Objects.requireNonNull(definition);
try {
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
builder.map(definition);
this.definition = Settings.builder().loadFromSource(builder.string()).build();
} catch (IOException e) {
throw new IllegalArgumentException("Failed to parse [" + definition + "]", e);
}
}
NameOrDefinition(StreamInput in) throws IOException {
name = in.readOptionalString();
if (in.readBoolean()) {
definition = Settings.readSettingsFromStream(in);
} else {
definition = null;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeOptionalString(name);
boolean isNotNullDefinition = this.definition != null;
out.writeBoolean(isNotNullDefinition);
if (isNotNullDefinition) {
Settings.writeSettingsToStream(definition, out);
}
}
}
public AnalyzeRequest() {
}
@ -82,35 +132,43 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
}
public AnalyzeRequest tokenizer(String tokenizer) {
this.tokenizer = tokenizer;
this.tokenizer = new NameOrDefinition(tokenizer);
return this;
}
public String tokenizer() {
public AnalyzeRequest tokenizer(Map<String, ?> tokenizer) {
this.tokenizer = new NameOrDefinition(tokenizer);
return this;
}
public NameOrDefinition tokenizer() {
return this.tokenizer;
}
public AnalyzeRequest tokenFilters(String... tokenFilters) {
if (tokenFilters == null) {
throw new IllegalArgumentException("token filters must not be null");
}
this.tokenFilters = tokenFilters;
public AnalyzeRequest addTokenFilter(String tokenFilter) {
this.tokenFilters.add(new NameOrDefinition(tokenFilter));
return this;
}
public String[] tokenFilters() {
public AnalyzeRequest addTokenFilter(Map<String, ?> tokenFilter) {
this.tokenFilters.add(new NameOrDefinition(tokenFilter));
return this;
}
public List<NameOrDefinition> tokenFilters() {
return this.tokenFilters;
}
public AnalyzeRequest charFilters(String... charFilters) {
if (charFilters == null) {
throw new IllegalArgumentException("char filters must not be null");
}
this.charFilters = charFilters;
public AnalyzeRequest addCharFilter(Map<String, ?> charFilter) {
this.charFilters.add(new NameOrDefinition(charFilter));
return this;
}
public String[] charFilters() {
public AnalyzeRequest addCharFilter(String charFilter) {
this.charFilters.add(new NameOrDefinition(charFilter));
return this;
}
public List<NameOrDefinition> charFilters() {
return this.charFilters;
}
@ -158,14 +216,12 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
super.readFrom(in);
text = in.readStringArray();
analyzer = in.readOptionalString();
tokenizer = in.readOptionalString();
tokenFilters = in.readStringArray();
charFilters = in.readStringArray();
tokenizer = in.readOptionalWriteable(NameOrDefinition::new);
tokenFilters.addAll(in.readList(NameOrDefinition::new));
charFilters.addAll(in.readList(NameOrDefinition::new));
field = in.readOptionalString();
if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
explain = in.readBoolean();
attributes = in.readStringArray();
}
explain = in.readBoolean();
attributes = in.readStringArray();
}
@Override
@ -173,13 +229,11 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
super.writeTo(out);
out.writeStringArray(text);
out.writeOptionalString(analyzer);
out.writeOptionalString(tokenizer);
out.writeStringArray(tokenFilters);
out.writeStringArray(charFilters);
out.writeOptionalWriteable(tokenizer);
out.writeList(tokenFilters);
out.writeList(charFilters);
out.writeOptionalString(field);
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
out.writeBoolean(explain);
out.writeStringArray(attributes);
}
out.writeBoolean(explain);
out.writeStringArray(attributes);
}
}

View File

@ -21,6 +21,8 @@ package org.elasticsearch.action.admin.indices.analyze;
import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import java.util.Map;
/**
*
*/
@ -54,7 +56,7 @@ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder<An
}
/**
* Instead of setting the analyzer, sets the tokenizer that will be used as part of a custom
* Instead of setting the analyzer, sets the tokenizer as name that will be used as part of a custom
* analyzer.
*/
public AnalyzeRequestBuilder setTokenizer(String tokenizer) {
@ -63,18 +65,43 @@ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder<An
}
/**
* Sets token filters that will be used on top of a tokenizer provided.
* Instead of setting the analyzer, sets the tokenizer using custom settings that will be used as part of a custom
* analyzer.
*/
public AnalyzeRequestBuilder setTokenFilters(String... tokenFilters) {
request.tokenFilters(tokenFilters);
public AnalyzeRequestBuilder setTokenizer(Map<String, ?> tokenizer) {
request.tokenizer(tokenizer);
return this;
}
/**
* Sets char filters that will be used before the tokenizer.
* Add token filter setting that will be used on top of a tokenizer provided.
*/
public AnalyzeRequestBuilder setCharFilters(String... charFilters) {
request.charFilters(charFilters);
public AnalyzeRequestBuilder addTokenFilter(Map<String, ?> tokenFilter) {
request.addTokenFilter(tokenFilter);
return this;
}
/**
* Add a name of token filter that will be used on top of a tokenizer provided.
*/
public AnalyzeRequestBuilder addTokenFilter(String tokenFilter) {
request.addTokenFilter(tokenFilter);
return this;
}
/**
* Add char filter setting that will be used on top of a tokenizer provided.
*/
public AnalyzeRequestBuilder addCharFilter(Map<String, ?> charFilter) {
request.addCharFilter(charFilter);
return this;
}
/**
* Add a name of char filter that will be used before the tokenizer.
*/
public AnalyzeRequestBuilder addCharFilter(String tokenFilter) {
request.addCharFilter(tokenFilter);
return this;
}

View File

@ -25,23 +25,25 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.apache.lucene.util.Attribute;
import org.apache.lucene.util.AttributeReflector;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.index.analysis.AnalysisService;
import org.elasticsearch.index.analysis.CharFilterFactory;
@ -167,65 +169,13 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
}
} else if (request.tokenizer() != null) {
TokenizerFactory tokenizerFactory;
if (analysisService == null) {
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(request.tokenizer());
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
}
tokenizerFactory = tokenizerFactoryFactory.get(environment, request.tokenizer());
} else {
tokenizerFactory = analysisService.tokenizer(request.tokenizer());
if (tokenizerFactory == null) {
throw new IllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
}
}
TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, analysisService, analysisRegistry, environment);
TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
if (request.tokenFilters() != null && request.tokenFilters().length > 0) {
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().length];
for (int i = 0; i < request.tokenFilters().length; i++) {
String tokenFilterName = request.tokenFilters()[i];
if (analysisService == null) {
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilterName);
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilterName);
} else {
tokenFilterFactories[i] = analysisService.tokenFilter(tokenFilterName);
if (tokenFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
}
}
if (tokenFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
}
}
}
tokenFilterFactories = getTokenFilterFactories(request, analysisService, analysisRegistry, environment, tokenFilterFactories);
CharFilterFactory[] charFilterFactories = new CharFilterFactory[0];
if (request.charFilters() != null && request.charFilters().length > 0) {
charFilterFactories = new CharFilterFactory[request.charFilters().length];
for (int i = 0; i < request.charFilters().length; i++) {
String charFilterName = request.charFilters()[i];
if (analysisService == null) {
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilterName);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterName + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilterName);
} else {
charFilterFactories[i] = analysisService.charFilter(charFilterName);
if (charFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
}
}
if (charFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
}
}
}
charFilterFactories = getCharFilterFactories(request, analysisService, analysisRegistry, environment, charFilterFactories);
analyzer = new CustomAnalyzer(tokenizerFactory, charFilterFactories, tokenFilterFactories);
closeAnalyzer = true;
@ -407,8 +357,9 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
} catch (IOException e) {
throw new ElasticsearchException("failed to analyze (charFiltering)", e);
}
if (len > 0)
if (len > 0) {
sb.append(buf, 0, len);
}
} while (len == BUFFER_SIZE);
return sb.toString();
}
@ -436,7 +387,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
lastPosition = lastPosition + increment;
}
tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(),
lastOffset +offset.endOffset(), type.type(), extractExtendedAttributes(stream, includeAttributes)));
lastOffset + offset.endOffset(), type.type(), extractExtendedAttributes(stream, includeAttributes)));
}
stream.end();
@ -470,27 +421,164 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
private static Map<String, Object> extractExtendedAttributes(TokenStream stream, final Set<String> includeAttributes) {
final Map<String, Object> extendedAttributes = new TreeMap<>();
stream.reflectWith(new AttributeReflector() {
@Override
public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
if (CharTermAttribute.class.isAssignableFrom(attClass))
return;
if (PositionIncrementAttribute.class.isAssignableFrom(attClass))
return;
if (OffsetAttribute.class.isAssignableFrom(attClass))
return;
if (TypeAttribute.class.isAssignableFrom(attClass))
return;
if (includeAttributes == null || includeAttributes.isEmpty() || includeAttributes.contains(key.toLowerCase(Locale.ROOT))) {
if (value instanceof BytesRef) {
final BytesRef p = (BytesRef) value;
value = p.toString();
}
extendedAttributes.put(key, value);
stream.reflectWith((attClass, key, value) -> {
if (CharTermAttribute.class.isAssignableFrom(attClass)) {
return;
}
if (PositionIncrementAttribute.class.isAssignableFrom(attClass)) {
return;
}
if (OffsetAttribute.class.isAssignableFrom(attClass)) {
return;
}
if (TypeAttribute.class.isAssignableFrom(attClass)) {
return;
}
if (includeAttributes == null || includeAttributes.isEmpty() || includeAttributes.contains(key.toLowerCase(Locale.ROOT))) {
if (value instanceof BytesRef) {
final BytesRef p = (BytesRef) value;
value = p.toString();
}
extendedAttributes.put(key, value);
}
});
return extendedAttributes;
}
private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
Environment environment, CharFilterFactory[] charFilterFactories) throws IOException {
if (request.charFilters() != null && request.charFilters().size() > 0) {
charFilterFactories = new CharFilterFactory[request.charFilters().size()];
for (int i = 0; i < request.charFilters().size(); i++) {
final AnalyzeRequest.NameOrDefinition charFilter = request.charFilters().get(i);
// parse anonymous settings
if (charFilter.definition != null) {
Settings settings = getAnonymousSettings(charFilter.definition);
String charFilterTypeName = settings.get("type");
if (charFilterTypeName == null) {
throw new IllegalArgumentException("Missing [type] setting for anonymous char filter: " + charFilter.definition);
}
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory =
analysisRegistry.getCharFilterProvider(charFilterTypeName);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterTypeName + "]");
}
// Need to set anonymous "name" of char_filter
charFilterFactories[i] = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter_[" + i + "]", settings);
} else {
if (analysisService == null) {
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name);
if (charFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global char filter under [" + charFilter.name + "]");
}
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilter.name);
} else {
charFilterFactories[i] = analysisService.charFilter(charFilter.name);
if (charFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
}
}
}
if (charFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
}
}
}
return charFilterFactories;
}
private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
Environment environment, TokenFilterFactory[] tokenFilterFactories) throws IOException {
if (request.tokenFilters() != null && request.tokenFilters().size() > 0) {
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().size()];
for (int i = 0; i < request.tokenFilters().size(); i++) {
final AnalyzeRequest.NameOrDefinition tokenFilter = request.tokenFilters().get(i);
// parse anonymous settings
if (tokenFilter.definition != null) {
Settings settings = getAnonymousSettings(tokenFilter.definition);
String filterTypeName = settings.get("type");
if (filterTypeName == null) {
throw new IllegalArgumentException("Missing [type] setting for anonymous token filter: " + tokenFilter.definition);
}
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory =
analysisRegistry.getTokenFilterProvider(filterTypeName);
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global token filter under [" + filterTypeName + "]");
}
// Need to set anonymous "name" of tokenfilter
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter_[" + i + "]", settings);
} else {
if (analysisService == null) {
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name);
if (tokenFilterFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilter.name + "]");
}
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilter.name);
} else {
tokenFilterFactories[i] = analysisService.tokenFilter(tokenFilter.name);
if (tokenFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilter.name + "]");
}
}
}
if (tokenFilterFactories[i] == null) {
throw new IllegalArgumentException("failed to find or create token filter under [" + tokenFilter.name + "]");
}
}
}
return tokenFilterFactories;
}
private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, AnalysisService analysisService,
AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
TokenizerFactory tokenizerFactory;
final AnalyzeRequest.NameOrDefinition tokenizer = request.tokenizer();
// parse anonymous settings
if (tokenizer.definition != null) {
Settings settings = getAnonymousSettings(tokenizer.definition);
String tokenizerTypeName = settings.get("type");
if (tokenizerTypeName == null) {
throw new IllegalArgumentException("Missing [type] setting for anonymous tokenizer: " + tokenizer.definition);
}
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory =
analysisRegistry.getTokenizerProvider(tokenizerTypeName);
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizerTypeName + "]");
}
// Need to set anonymous "name" of tokenizer
tokenizerFactory = tokenizerFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenizer", settings);
} else {
if (analysisService == null) {
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name);
if (tokenizerFactoryFactory == null) {
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizer.name + "]");
}
tokenizerFactory = tokenizerFactoryFactory.get(environment, tokenizer.name);
} else {
tokenizerFactory = analysisService.tokenizer(tokenizer.name);
if (tokenizerFactory == null) {
throw new IllegalArgumentException("failed to find tokenizer under [" + tokenizer.name + "]");
}
}
}
return tokenizerFactory;
}
private static IndexSettings getNaIndexSettings(Settings settings) {
IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build();
return new IndexSettings(metaData, Settings.EMPTY);
}
private static Settings getAnonymousSettings(Settings providerSetting) {
return Settings.builder().put(providerSetting)
// for _na_
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
.build();
}
}

View File

@ -107,7 +107,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
}
@Override
public void onFailure(Throwable t) {
public void onFailure(Exception t) {
logger.debug("failed to close indices [{}]", t, (Object)concreteIndices);
listener.onFailure(t);
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.create;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -55,6 +56,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private final Set<ClusterBlock> blocks = new HashSet<>();
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) {
this.originalMessage = originalMessage;
@ -98,6 +101,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
return this;
}
public CreateIndexClusterStateUpdateRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
this.waitForActiveShards = waitForActiveShards;
return this;
}
public TransportMessage originalMessage() {
return originalMessage;
}
@ -142,4 +150,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
public boolean updateAllTypes() {
return updateAllTypes;
}
public ActiveShardCount waitForActiveShards() {
return waitForActiveShards;
}
}

Some files were not shown because too many files have changed in this diff Show More