Merge branch 'master' into pr/15724-gce-network-host-master
This commit is contained in:
commit
98fd5833cc
|
@ -3,7 +3,11 @@ GitHub is reserved for bug reports and feature requests. The best place
|
|||
to ask a general question is at the Elastic Discourse forums at
|
||||
https://discuss.elastic.co. If you are in fact posting a bug report or
|
||||
a feature request, please include one and only one of the below blocks
|
||||
in your new issue.
|
||||
in your new issue. Note that whether you're filing a bug report or a
|
||||
feature request, ensure that your submission is for an
|
||||
[OS that we support](https://www.elastic.co/support/matrix#show_os).
|
||||
Bug reports on an OS that we do not support or feature requests
|
||||
specific to an OS that we do not support will be closed.
|
||||
-->
|
||||
|
||||
<!--
|
||||
|
|
|
@ -42,7 +42,7 @@ Vagrant.configure(2) do |config|
|
|||
# debian and it works fine.
|
||||
config.vm.define "debian-8" do |config|
|
||||
config.vm.box = "elastic/debian-8-x86_64"
|
||||
deb_common config, 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
|
||||
deb_common config, 'echo deb http://cloudfront.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
|
||||
end
|
||||
config.vm.define "centos-6" do |config|
|
||||
config.vm.box = "elastic/centos-6-x86_64"
|
||||
|
@ -60,8 +60,8 @@ Vagrant.configure(2) do |config|
|
|||
config.vm.box = "elastic/oraclelinux-7-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
config.vm.define "fedora-22" do |config|
|
||||
config.vm.box = "elastic/fedora-22-x86_64"
|
||||
config.vm.define "fedora-24" do |config|
|
||||
config.vm.box = "elastic/fedora-24-x86_64"
|
||||
dnf_common config
|
||||
end
|
||||
config.vm.define "opensuse-13" do |config|
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
|
|||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.DummyTransportAddress;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
|
||||
|
@ -102,7 +102,7 @@ public final class Allocators {
|
|||
}
|
||||
|
||||
public static DiscoveryNode newNode(String nodeId, Map<String, String> attributes) {
|
||||
return new DiscoveryNode("", nodeId, DummyTransportAddress.INSTANCE, attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER,
|
||||
return new DiscoveryNode("", nodeId, LocalTransportAddress.buildUnique(), attributes, Sets.newHashSet(DiscoveryNode.Role.MASTER,
|
||||
DiscoveryNode.Role.DATA), Version.CURRENT);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -173,6 +173,11 @@ subprojects {
|
|||
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
|
||||
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
|
||||
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
|
||||
// for transport client
|
||||
"org.elasticsearch.plugin:transport-netty3-client:${version}": ':modules:transport-netty3',
|
||||
"org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex',
|
||||
"org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache',
|
||||
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
|
||||
]
|
||||
configurations.all {
|
||||
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.gradle
|
||||
|
||||
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
|
||||
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.JavaVersion
|
||||
|
@ -35,6 +34,7 @@ import org.gradle.api.artifacts.ResolvedArtifact
|
|||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
import org.gradle.api.artifacts.maven.MavenPom
|
||||
import org.gradle.api.publish.maven.MavenPublication
|
||||
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
|
||||
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
|
||||
import org.gradle.api.tasks.bundling.Jar
|
||||
import org.gradle.api.tasks.compile.JavaCompile
|
||||
|
@ -344,7 +344,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
|
||||
/**Configuration generation of maven poms. */
|
||||
public static void configurePomGeneration(Project project) {
|
||||
project.plugins.withType(MavenBasePublishPlugin.class).whenPluginAdded {
|
||||
project.plugins.withType(MavenPublishPlugin.class).whenPluginAdded {
|
||||
project.publishing {
|
||||
publications {
|
||||
all { MavenPublication publication -> // we only deal with maven
|
||||
|
|
|
@ -131,8 +131,9 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
}
|
||||
|
||||
private void response(Snippet response) {
|
||||
current.println(" - response_body: |")
|
||||
response.contents.eachLine { current.println(" $it") }
|
||||
current.println(" - match: ")
|
||||
current.println(" \$body: ")
|
||||
response.contents.eachLine { current.println(" $it") }
|
||||
}
|
||||
|
||||
void emitDo(String method, String pathAndQuery,
|
||||
|
@ -183,13 +184,6 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
current.println('---')
|
||||
current.println("setup:")
|
||||
body(setup, true)
|
||||
// always wait for yellow before anything is executed
|
||||
current.println(
|
||||
" - do:\n" +
|
||||
" raw:\n" +
|
||||
" method: GET\n" +
|
||||
" path: \"_cluster/health\"\n" +
|
||||
" wait_for_status: \"yellow\"")
|
||||
}
|
||||
|
||||
private void body(Snippet snippet, boolean inSetup) {
|
||||
|
|
|
@ -18,14 +18,23 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.plugin
|
||||
|
||||
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
|
||||
import nebula.plugin.publishing.maven.MavenScmPlugin
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.XmlProvider
|
||||
import org.gradle.api.publish.maven.MavenPublication
|
||||
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.bundling.Zip
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.StandardCopyOption
|
||||
import java.util.regex.Matcher
|
||||
import java.util.regex.Pattern
|
||||
|
||||
/**
|
||||
* Encapsulates build configuration for an Elasticsearch plugin.
|
||||
*/
|
||||
|
@ -38,19 +47,35 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
// this afterEvaluate must happen before the afterEvaluate added by integTest creation,
|
||||
// so that the file name resolution for installing the plugin will be setup
|
||||
project.afterEvaluate {
|
||||
boolean isModule = project.path.startsWith(':modules:')
|
||||
String name = project.pluginProperties.extension.name
|
||||
project.jar.baseName = name
|
||||
project.bundlePlugin.baseName = name
|
||||
|
||||
if (project.pluginProperties.extension.hasClientJar) {
|
||||
// for plugins which work with the transport client, we copy the jar
|
||||
// file to a new name, copy the nebula generated pom to the same name,
|
||||
// and generate a different pom for the zip
|
||||
project.signArchives.enabled = false
|
||||
addJarPomGeneration(project)
|
||||
addClientJarTask(project)
|
||||
if (isModule == false) {
|
||||
addZipPomGeneration(project)
|
||||
}
|
||||
} else {
|
||||
// no client plugin, so use the pom file from nebula, without jar, for the zip
|
||||
project.ext.set("nebulaPublish.maven.jar", false)
|
||||
}
|
||||
|
||||
project.integTest.dependsOn(project.bundlePlugin)
|
||||
project.tasks.run.dependsOn(project.bundlePlugin)
|
||||
if (project.path.startsWith(':modules:')) {
|
||||
if (isModule) {
|
||||
project.integTest.clusterConfig.module(project)
|
||||
project.tasks.run.clusterConfig.module(project)
|
||||
} else {
|
||||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
addPomGeneration(project)
|
||||
project.integTest.clusterConfig.plugin(project.path)
|
||||
project.tasks.run.clusterConfig.plugin(project.path)
|
||||
addZipPomGeneration(project)
|
||||
}
|
||||
|
||||
project.namingConventions {
|
||||
|
@ -60,6 +85,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
}
|
||||
createIntegTestTask(project)
|
||||
createBundleTask(project)
|
||||
project.configurations.getByName('default').extendsFrom(project.configurations.getByName('runtime'))
|
||||
project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build
|
||||
}
|
||||
|
||||
|
@ -118,40 +144,93 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
}
|
||||
project.assemble.dependsOn(bundle)
|
||||
|
||||
// remove jar from the archives (things that will be published), and set it to the zip
|
||||
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
|
||||
project.artifacts.add('archives', bundle)
|
||||
|
||||
// also make the zip the default artifact (used when depending on this project)
|
||||
project.configurations.getByName('default').extendsFrom = []
|
||||
project.artifacts.add('default', bundle)
|
||||
// also make the zip available as a configuration (used when depending on this project)
|
||||
project.configurations.create('zip')
|
||||
project.artifacts.add('zip', bundle)
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the plugin jar and zip as publications.
|
||||
*/
|
||||
protected static void addPomGeneration(Project project) {
|
||||
project.plugins.apply(MavenBasePublishPlugin.class)
|
||||
project.plugins.apply(MavenScmPlugin.class)
|
||||
/** Adds a task to move jar and associated files to a "-client" name. */
|
||||
protected static void addClientJarTask(Project project) {
|
||||
Task clientJar = project.tasks.create('clientJar')
|
||||
clientJar.dependsOn('generatePomFileForJarPublication', project.jar, project.javadocJar, project.sourcesJar)
|
||||
clientJar.doFirst {
|
||||
Path jarFile = project.jar.outputs.files.singleFile.toPath()
|
||||
String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}")
|
||||
Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING)
|
||||
|
||||
String pomFileName = jarFile.fileName.toString().replace('.jar', '.pom')
|
||||
String clientPomFileName = clientFileName.replace('.jar', '.pom')
|
||||
Files.copy(jarFile.resolveSibling(pomFileName), jarFile.resolveSibling(clientPomFileName),
|
||||
StandardCopyOption.REPLACE_EXISTING)
|
||||
|
||||
String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar')
|
||||
String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar')
|
||||
Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName),
|
||||
StandardCopyOption.REPLACE_EXISTING)
|
||||
|
||||
String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar')
|
||||
String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar')
|
||||
Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName),
|
||||
StandardCopyOption.REPLACE_EXISTING)
|
||||
}
|
||||
project.assemble.dependsOn(clientJar)
|
||||
}
|
||||
|
||||
static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/)
|
||||
|
||||
/** Find the reponame. */
|
||||
protected static String urlFromOrigin(String origin) {
|
||||
if (origin.startsWith('https')) {
|
||||
return origin
|
||||
}
|
||||
Matcher matcher = GIT_PATTERN.matcher(origin)
|
||||
if (matcher.matches()) {
|
||||
return "https://${matcher.group(1)}/${matcher.group(2)}"
|
||||
} else {
|
||||
return origin // best effort, the url doesnt really matter, it is just required by maven central
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds nebula publishing task to generate a pom file for the plugin. */
|
||||
protected static void addJarPomGeneration(Project project) {
|
||||
project.plugins.apply(MavenPublishPlugin.class)
|
||||
|
||||
project.publishing {
|
||||
publications {
|
||||
nebula {
|
||||
artifact project.bundlePlugin
|
||||
pom.withXml {
|
||||
// overwrite the name/description in the pom nebula set up
|
||||
Node root = asNode()
|
||||
for (Node node : root.children()) {
|
||||
if (node.name() == 'name') {
|
||||
node.setValue(project.pluginProperties.extension.name)
|
||||
} else if (node.name() == 'description') {
|
||||
node.setValue(project.pluginProperties.extension.description)
|
||||
}
|
||||
}
|
||||
jar(MavenPublication) {
|
||||
from project.components.java
|
||||
pom.withXml { XmlProvider xml ->
|
||||
Node root = xml.asNode()
|
||||
root.appendNode('name', project.pluginProperties.extension.name)
|
||||
root.appendNode('description', project.pluginProperties.extension.description)
|
||||
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
|
||||
Node scmNode = root.appendNode('scm')
|
||||
scmNode.appendNode('url', project.scminfo.origin)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a task to generate a*/
|
||||
protected void addZipPomGeneration(Project project) {
|
||||
project.plugins.apply(MavenPublishPlugin.class)
|
||||
|
||||
project.publishing {
|
||||
publications {
|
||||
zip(MavenPublication) {
|
||||
artifact project.bundlePlugin
|
||||
pom.packaging = 'pom'
|
||||
pom.withXml { XmlProvider xml ->
|
||||
Node root = xml.asNode()
|
||||
root.appendNode('name', project.pluginProperties.extension.name)
|
||||
root.appendNode('description', project.pluginProperties.extension.description)
|
||||
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
|
||||
Node scmNode = root.appendNode('scm')
|
||||
scmNode.appendNode('url', project.scminfo.origin)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,6 +39,10 @@ class PluginPropertiesExtension {
|
|||
@Input
|
||||
String classname
|
||||
|
||||
/** Indicates whether the plugin jar should be made available for the transport client. */
|
||||
@Input
|
||||
boolean hasClientJar = false
|
||||
|
||||
PluginPropertiesExtension(Project project) {
|
||||
name = project.name
|
||||
version = project.version
|
||||
|
|
|
@ -20,12 +20,15 @@ package org.elasticsearch.gradle.test
|
|||
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
/** Configuration for an elasticsearch cluster, used for integration tests. */
|
||||
class ClusterConfiguration {
|
||||
|
||||
private final Project project
|
||||
|
||||
@Input
|
||||
String distribution = 'integ-test-zip'
|
||||
|
||||
|
@ -77,6 +80,10 @@ class ClusterConfiguration {
|
|||
return tmpFile.exists()
|
||||
}
|
||||
|
||||
public ClusterConfiguration(Project project) {
|
||||
this.project = project
|
||||
}
|
||||
|
||||
Map<String, String> systemProperties = new HashMap<>()
|
||||
|
||||
Map<String, String> settings = new HashMap<>()
|
||||
|
@ -84,7 +91,7 @@ class ClusterConfiguration {
|
|||
// map from destination path, to source file
|
||||
Map<String, Object> extraConfigFiles = new HashMap<>()
|
||||
|
||||
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
|
||||
LinkedHashMap<String, Project> plugins = new LinkedHashMap<>()
|
||||
|
||||
List<Project> modules = new ArrayList<>()
|
||||
|
||||
|
@ -101,13 +108,9 @@ class ClusterConfiguration {
|
|||
}
|
||||
|
||||
@Input
|
||||
void plugin(String name, FileCollection file) {
|
||||
plugins.put(name, file)
|
||||
}
|
||||
|
||||
@Input
|
||||
void plugin(String name, Project pluginProject) {
|
||||
plugins.put(name, pluginProject)
|
||||
void plugin(String path) {
|
||||
Project pluginProject = project.project(path)
|
||||
plugins.put(pluginProject.name, pluginProject)
|
||||
}
|
||||
|
||||
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
|
||||
|
|
|
@ -167,7 +167,7 @@ class ClusterFormationTasks {
|
|||
}
|
||||
|
||||
// install plugins
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
||||
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
|
||||
}
|
||||
|
@ -326,38 +326,34 @@ class ClusterFormationTasks {
|
|||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
|
||||
List<FileCollection> pluginFiles = []
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
FileCollection pluginZip
|
||||
if (plugin.getValue() instanceof Project) {
|
||||
Project pluginProject = plugin.getValue()
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
|
||||
}
|
||||
String configurationName = "_plugin_${pluginProject.path}"
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
project.dependencies.add(configurationName, pluginProject)
|
||||
setup.dependsOn(pluginProject.tasks.bundlePlugin)
|
||||
pluginZip = configuration
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
|
||||
// also allow rest tests to use the rest spec from the plugin
|
||||
Copy copyRestSpec = null
|
||||
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
|
||||
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
|
||||
if (restApiDir.exists() == false) continue
|
||||
if (copyRestSpec == null) {
|
||||
copyRestSpec = project.tasks.create(name: pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec'), type: Copy)
|
||||
copyPlugins.dependsOn(copyRestSpec)
|
||||
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
|
||||
}
|
||||
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
|
||||
}
|
||||
} else {
|
||||
pluginZip = plugin.getValue()
|
||||
Project pluginProject = plugin.getValue()
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
|
||||
}
|
||||
pluginFiles.add(pluginZip)
|
||||
String configurationName = "_plugin_${pluginProject.path}"
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip'))
|
||||
setup.dependsOn(pluginProject.tasks.bundlePlugin)
|
||||
|
||||
// also allow rest tests to use the rest spec from the plugin
|
||||
String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec')
|
||||
Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName)
|
||||
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
|
||||
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
|
||||
if (restApiDir.exists() == false) continue
|
||||
if (copyRestSpec == null) {
|
||||
copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy)
|
||||
copyPlugins.dependsOn(copyRestSpec)
|
||||
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
|
||||
}
|
||||
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
|
||||
}
|
||||
pluginFiles.add(configuration)
|
||||
}
|
||||
|
||||
copyPlugins.into(node.pluginsTmpDir)
|
||||
|
@ -379,15 +375,10 @@ class ClusterFormationTasks {
|
|||
return installModule
|
||||
}
|
||||
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Object plugin) {
|
||||
FileCollection pluginZip
|
||||
if (plugin instanceof Project) {
|
||||
pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
|
||||
} else {
|
||||
pluginZip = plugin
|
||||
}
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin) {
|
||||
FileCollection pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/elasticsearch-plugin'), 'install', file]
|
||||
return configureExecTask(name, project, setup, node, args)
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.gradle.util.ConfigureUtil
|
|||
*/
|
||||
public class RestIntegTestTask extends RandomizedTestingTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration()
|
||||
ClusterConfiguration clusterConfig
|
||||
|
||||
/** Flag indicating whether the rest tests in the rest spec should be run. */
|
||||
@Input
|
||||
|
@ -44,6 +44,7 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
dependsOn(project.testClasses)
|
||||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir = project.sourceSets.test.output.classesDir
|
||||
clusterConfig = new ClusterConfiguration(project)
|
||||
|
||||
// start with the common test configuration
|
||||
configure(BuildPlugin.commonTestConfig(project))
|
||||
|
|
|
@ -7,11 +7,15 @@ import org.gradle.util.ConfigureUtil
|
|||
|
||||
public class RunTask extends DefaultTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
|
||||
ClusterConfiguration clusterConfig
|
||||
|
||||
public RunTask() {
|
||||
description = "Runs elasticsearch with '${project.path}'"
|
||||
group = 'Verification'
|
||||
clusterConfig = new ClusterConfiguration(project)
|
||||
clusterConfig.httpPort = 9200
|
||||
clusterConfig.transportPort = 9300
|
||||
clusterConfig.daemonize = false
|
||||
project.afterEvaluate {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
}
|
||||
|
|
|
@ -39,6 +39,27 @@
|
|||
|
||||
<module name="EqualsHashCode" />
|
||||
|
||||
<!-- Checks that the order of modifiers conforms to the suggestions in the
|
||||
Java Language specification, sections 8.1.1, 8.3.1 and 8.4.3. It is not that
|
||||
the standard is perfect, but having a consistent order makes the code more
|
||||
readable and no other order is compellingly better than the standard.
|
||||
The correct order is:
|
||||
public
|
||||
protected
|
||||
private
|
||||
abstract
|
||||
static
|
||||
final
|
||||
transient
|
||||
volatile
|
||||
synchronized
|
||||
native
|
||||
strictfp
|
||||
-->
|
||||
<module name="ModifierOrder" />
|
||||
|
||||
<module name="RedundantModifier" />
|
||||
|
||||
<!-- We don't use Java's builtin serialization and we suppress all warning
|
||||
about it. The flip side of that coin is that we shouldn't _try_ to use
|
||||
it. We can't outright ban it with ForbiddenApis because it complain about
|
||||
|
|
|
@ -233,7 +233,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Security.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]ElasticsearchClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]FilterClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]node[/\\]NodeClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]support[/\\]AbstractClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]support[/\\]TransportProxyClient.java" checks="LineLength" />
|
||||
|
@ -266,7 +265,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataMappingService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]MetaDataUpdateSettingsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]RepositoriesMetaData.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]node[/\\]DiscoveryNodes.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]IndexRoutingTable.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]IndexShardRoutingTable.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]routing[/\\]OperationRouting.java" checks="LineLength" />
|
||||
|
@ -341,12 +339,9 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]DiscoveryService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]DiscoverySettings.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]local[/\\]LocalDiscovery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]NodeJoinController.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscovery.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]elect[/\\]ElectMasterService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]fd[/\\]FaultDetection.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]fd[/\\]MasterFaultDetection.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]fd[/\\]NodesFaultDetection.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]membership[/\\]MembershipAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ping[/\\]ZenPing.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PendingClusterStatesQueue.java" checks="LineLength" />
|
||||
|
@ -357,7 +352,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayMetaState.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]LocalAllocateDangledIndices.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]MetaDataStateFormat.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]PrimaryShardAllocator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReplicaShardAllocator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]TransportNodesListGatewayMetaState.java" checks="LineLength" />
|
||||
|
@ -561,7 +555,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalAggregation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]ValuesSourceAggregationBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]BucketsAggregator.java" checks="LineLength" />
|
||||
|
@ -581,10 +574,7 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]InternalReverseNested.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]ReverseNestedAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]InternalRange.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]RangeAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]date[/\\]InternalDateRange.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]ipv4[/\\]InternalIPv4Range.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedBytesHashSamplerAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedMapSamplerAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedNumericSamplerAggregator.java" checks="LineLength" />
|
||||
|
@ -592,46 +582,34 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]InternalSampler.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]SamplerAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]GlobalOrdinalsSignificantTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]InternalSignificantTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantLongTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantStringTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsAggregatorFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsParametersParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]UnmappedSignificantTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]GND.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]NXYSignificanceHeuristic.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]PercentageScore.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]ScriptHeuristic.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]SignificanceHeuristic.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]AbstractTermsParametersParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]DoubleTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]DoubleTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]GlobalOrdinalsStringTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]InternalOrder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]InternalTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]LongTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]LongTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]StringTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]StringTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregatorFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsParametersParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]UnmappedTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]support[/\\]IncludeExclude.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]ValuesSourceMetricsAggregationBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]CardinalityAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]HyperLogLogPlusPlus.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geobounds[/\\]GeoBoundsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geobounds[/\\]InternalGeoBounds.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]AbstractTDigestPercentilesAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentileRanksAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentilesAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]scripted[/\\]InternalScriptedMetric.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]scripted[/\\]ScriptedMetricAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]stats[/\\]extended[/\\]ExtendedStatsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]tophits[/\\]TopHitsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]bucketscript[/\\]BucketScriptPipelineAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]derivative[/\\]InternalDerivative.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]AggregationPath.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]ValuesSourceParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueFormat.java" checks="LineLength" />
|
||||
|
@ -762,7 +740,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]allocation[/\\]ShardsAllocatorModuleIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]allocation[/\\]SimpleAllocationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]health[/\\]ClusterIndexHealthTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]health[/\\]ClusterStateHealthTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]AutoExpandReplicasTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]DateMathExpressionResolverTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]HumanReadableIndexSettingsTests.java" checks="LineLength" />
|
||||
|
@ -848,7 +825,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]BlockingClusterStatePublishResponseHandlerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]DiscoveryWithServiceDisruptionsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]ZenUnicastDiscoveryIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]NodeJoinControllerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscoveryUnitTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]publish[/\\]PublishClusterStateActionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]document[/\\]DocumentActionsIT.java" checks="LineLength" />
|
||||
|
@ -869,7 +845,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReplicaShardAllocatorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReusePeerRecoverySharedTest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]get[/\\]GetActionIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]http[/\\]netty[/\\]NettyHttpServerPipeliningTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexModuleTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexWithShadowReplicasIT.java" checks="LineLength" />
|
||||
|
@ -1060,7 +1035,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsDocCountErrorIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsShardMinDocCountIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregatorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificanceHeuristicTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AbstractGeoTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AvgIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]SumIT.java" checks="LineLength" />
|
||||
|
@ -1132,16 +1106,10 @@
|
|||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]MoreExpressionTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyPlugin.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BucketScriptTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HDRPercentileRanksTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HDRPercentilesTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HistogramTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IPv4RangeTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IndexLookupTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IndicesRequestTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]LongTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]MinDocCountTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]MinTests.java" checks="LineLength" />
|
||||
|
@ -1150,8 +1118,6 @@
|
|||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SearchFieldsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SimpleSortTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]StringTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TDigestPercentileRanksTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TDigestPercentilesTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovySecurityTests.java" checks="LineLength" />
|
||||
|
@ -1207,10 +1173,6 @@
|
|||
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapper.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-murmur3[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]murmur3[/\\]Murmur3FieldMapperUpgradeTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-size[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]size[/\\]SizeFieldMapper.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-size[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]size[/\\]SizeFieldMapperUpgradeTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-size[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]size[/\\]SizeMappingIT.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]mapper-size[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]size[/\\]SizeMappingTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]blobstore[/\\]AzureBlobContainer.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]blobstore[/\\]AzureBlobStore.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]repository-azure[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cloud[/\\]azure[/\\]storage[/\\]AzureStorageServiceImpl.java" checks="LineLength" />
|
||||
|
@ -1263,26 +1225,8 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]engine[/\\]MockEngineSupport.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]hamcrest[/\\]ElasticsearchAssertions.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]junit[/\\]listeners[/\\]LoggingListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]json[/\\]JsonPath.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanEqualToParser.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]GreaterThanParser.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]LessThanOrEqualToParser.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]LessThanParser.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]RestTestSuiteParseContext.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]parser[/\\]RestTestSuiteParser.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]GreaterThanAssertion.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]GreaterThanEqualToAssertion.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]LengthAssertion.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]LessThanAssertion.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]LessThanOrEqualToAssertion.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]section[/\\]MatchAssertion.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]spec[/\\]RestApiParser.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]support[/\\]FileUtils.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSDirectoryService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]store[/\\]MockFSIndexStore.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]test[/\\]FileUtilsTests.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]test[/\\]JsonPathTests.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]rest[/\\]test[/\\]RestTestParserTests.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]test[/\\]InternalTestClusterTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CliTool.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]admin[/\\]indices[/\\]settings[/\\]RestGetSettingsAction.java" checks="LineLength" />
|
||||
|
|
|
@ -36,7 +36,7 @@ public class NamingConventionsCheckBadClasses {
|
|||
public void testDummy() {}
|
||||
}
|
||||
|
||||
public static abstract class DummyAbstractTests extends UnitTestCase {
|
||||
public abstract static class DummyAbstractTests extends UnitTestCase {
|
||||
}
|
||||
|
||||
public interface DummyInterfaceTests {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
elasticsearch = 5.0.0-alpha4
|
||||
elasticsearch = 5.0.0-alpha5
|
||||
lucene = 6.1.0
|
||||
|
||||
# optional dependencies
|
||||
|
|
|
@ -26,6 +26,8 @@ apply plugin: 'ru.vyarus.animalsniffer'
|
|||
targetCompatibility = JavaVersion.VERSION_1_7
|
||||
sourceCompatibility = JavaVersion.VERSION_1_7
|
||||
|
||||
group = 'org.elasticsearch.client'
|
||||
|
||||
dependencies {
|
||||
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
|
||||
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
|
||||
|
|
|
@ -28,7 +28,7 @@ import java.net.URI;
|
|||
*/
|
||||
final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase {
|
||||
|
||||
final static String METHOD_NAME = HttpDelete.METHOD_NAME;
|
||||
static final String METHOD_NAME = HttpDelete.METHOD_NAME;
|
||||
|
||||
HttpDeleteWithEntity(final URI uri) {
|
||||
setURI(uri);
|
||||
|
|
|
@ -28,7 +28,7 @@ import java.net.URI;
|
|||
*/
|
||||
final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
|
||||
|
||||
final static String METHOD_NAME = HttpGet.METHOD_NAME;
|
||||
static final String METHOD_NAME = HttpGet.METHOD_NAME;
|
||||
|
||||
HttpGetWithEntity(final URI uri) {
|
||||
setURI(uri);
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.http.HttpEntity;
|
|||
import org.apache.http.HttpEntityEnclosingRequest;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.client.methods.HttpUriRequest;
|
||||
import org.apache.http.entity.BufferedHttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
|
@ -55,7 +56,7 @@ final class RequestLogger {
|
|||
*/
|
||||
static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("request [" + request.getMethod() + " " + host + request.getRequestLine().getUri() +
|
||||
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) +
|
||||
"] returned [" + httpResponse.getStatusLine() + "]");
|
||||
}
|
||||
if (tracer.isTraceEnabled()) {
|
||||
|
@ -81,8 +82,10 @@ final class RequestLogger {
|
|||
* Logs a request that failed
|
||||
*/
|
||||
static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, IOException e) {
|
||||
logger.debug("request [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "] failed", e);
|
||||
if (logger.isTraceEnabled()) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e);
|
||||
}
|
||||
if (tracer.isTraceEnabled()) {
|
||||
String traceRequest;
|
||||
try {
|
||||
traceRequest = buildTraceRequest(request, host);
|
||||
|
@ -98,7 +101,7 @@ final class RequestLogger {
|
|||
* Creates curl output for given request
|
||||
*/
|
||||
static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException {
|
||||
String requestLine = "curl -iX " + request.getMethod() + " '" + host + request.getRequestLine().getUri() + "'";
|
||||
String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestLine()) + "'";
|
||||
if (request instanceof HttpEntityEnclosingRequest) {
|
||||
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
|
||||
if (enclosingRequest.getEntity() != null) {
|
||||
|
@ -143,4 +146,11 @@ final class RequestLogger {
|
|||
}
|
||||
return responseLine;
|
||||
}
|
||||
|
||||
private static String getUri(RequestLine requestLine) {
|
||||
if (requestLine.getUri().charAt(0) != '/') {
|
||||
return "/" + requestLine.getUri();
|
||||
}
|
||||
return requestLine.getUri();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,8 +37,6 @@ import org.apache.http.client.methods.HttpPut;
|
|||
import org.apache.http.client.methods.HttpRequestBase;
|
||||
import org.apache.http.client.methods.HttpTrace;
|
||||
import org.apache.http.client.utils.URIBuilder;
|
||||
import org.apache.http.config.Registry;
|
||||
import org.apache.http.conn.socket.ConnectionSocketFactory;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
|
@ -91,7 +89,7 @@ public final class RestClient implements Closeable {
|
|||
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
|
||||
private final FailureListener failureListener;
|
||||
|
||||
private RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
|
||||
RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
|
||||
HttpHost[] hosts, FailureListener failureListener) {
|
||||
this.client = client;
|
||||
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
|
||||
|
@ -117,6 +115,39 @@ public final class RestClient implements Closeable {
|
|||
this.blacklist.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the current client points to.
|
||||
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters and request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the current client points to.
|
||||
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, params, null, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the current client points to.
|
||||
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
|
||||
|
@ -360,10 +391,11 @@ public final class RestClient implements Closeable {
|
|||
private static final Header[] EMPTY_HEADERS = new Header[0];
|
||||
|
||||
private final HttpHost[] hosts;
|
||||
private CloseableHttpClient httpClient;
|
||||
private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
|
||||
private Header[] defaultHeaders = EMPTY_HEADERS;
|
||||
private FailureListener failureListener;
|
||||
private HttpClientConfigCallback httpClientConfigCallback;
|
||||
private RequestConfigCallback requestConfigCallback;
|
||||
|
||||
/**
|
||||
* Creates a new builder instance and sets the hosts that the client will send requests to.
|
||||
|
@ -375,17 +407,6 @@ public final class RestClient implements Closeable {
|
|||
this.hosts = hosts;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the http client. A new default one will be created if not
|
||||
* specified, by calling {@link #createDefaultHttpClient(Registry)})}.
|
||||
*
|
||||
* @see CloseableHttpClient
|
||||
*/
|
||||
public Builder setHttpClient(CloseableHttpClient httpClient) {
|
||||
this.httpClient = httpClient;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
|
||||
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
|
||||
|
@ -401,12 +422,10 @@ public final class RestClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the default request headers, to be used when creating the default http client instance.
|
||||
* In case the http client is set through {@link #setHttpClient(CloseableHttpClient)}, the default headers need to be
|
||||
* set to it externally during http client construction.
|
||||
* Sets the default request headers, to be used sent with every request unless overridden on a per request basis
|
||||
*/
|
||||
public Builder setDefaultHeaders(Header[] defaultHeaders) {
|
||||
Objects.requireNonNull(defaultHeaders, "default headers must not be null");
|
||||
Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null");
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
Objects.requireNonNull(defaultHeader, "default header must not be null");
|
||||
}
|
||||
|
@ -418,48 +437,94 @@ public final class RestClient implements Closeable {
|
|||
* Sets the {@link FailureListener} to be notified for each request failure
|
||||
*/
|
||||
public Builder setFailureListener(FailureListener failureListener) {
|
||||
Objects.requireNonNull(failureListener, "failure listener must not be null");
|
||||
Objects.requireNonNull(failureListener, "failureListener must not be null");
|
||||
this.failureListener = failureListener;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link HttpClientConfigCallback} to be used to customize http client configuration
|
||||
*/
|
||||
public Builder setHttpClientConfigCallback(HttpClientConfigCallback httpClientConfigCallback) {
|
||||
Objects.requireNonNull(httpClientConfigCallback, "httpClientConfigCallback must not be null");
|
||||
this.httpClientConfigCallback = httpClientConfigCallback;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link RequestConfigCallback} to be used to customize http client configuration
|
||||
*/
|
||||
public Builder setRequestConfigCallback(RequestConfigCallback requestConfigCallback) {
|
||||
Objects.requireNonNull(requestConfigCallback, "requestConfigCallback must not be null");
|
||||
this.requestConfigCallback = requestConfigCallback;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link RestClient} based on the provided configuration.
|
||||
*/
|
||||
public RestClient build() {
|
||||
if (httpClient == null) {
|
||||
httpClient = createDefaultHttpClient(null);
|
||||
}
|
||||
if (failureListener == null) {
|
||||
failureListener = new FailureListener();
|
||||
}
|
||||
CloseableHttpClient httpClient = createHttpClient();
|
||||
return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link CloseableHttpClient} with default settings. Used when the http client instance is not provided.
|
||||
*
|
||||
* @see CloseableHttpClient
|
||||
*/
|
||||
public static CloseableHttpClient createDefaultHttpClient(Registry<ConnectionSocketFactory> socketFactoryRegistry) {
|
||||
PoolingHttpClientConnectionManager connectionManager;
|
||||
if (socketFactoryRegistry == null) {
|
||||
connectionManager = new PoolingHttpClientConnectionManager();
|
||||
} else {
|
||||
connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
|
||||
private CloseableHttpClient createHttpClient() {
|
||||
//default timeouts are all infinite
|
||||
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
|
||||
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
|
||||
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS);
|
||||
|
||||
if (requestConfigCallback != null) {
|
||||
requestConfigCallback.customizeRequestConfig(requestConfigBuilder);
|
||||
}
|
||||
RequestConfig requestConfig = requestConfigBuilder.build();
|
||||
|
||||
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
|
||||
//default settings may be too constraining
|
||||
connectionManager.setDefaultMaxPerRoute(10);
|
||||
connectionManager.setMaxTotal(30);
|
||||
|
||||
//default timeouts are all infinite
|
||||
RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
|
||||
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
|
||||
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS).build();
|
||||
return HttpClientBuilder.create().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build();
|
||||
HttpClientBuilder httpClientBuilder = HttpClientBuilder.create().setConnectionManager(connectionManager)
|
||||
.setDefaultRequestConfig(requestConfig);
|
||||
|
||||
if (httpClientConfigCallback != null) {
|
||||
httpClientConfigCallback.customizeHttpClient(httpClientBuilder);
|
||||
}
|
||||
return httpClientBuilder.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback used the default {@link RequestConfig} being set to the {@link CloseableHttpClient}
|
||||
* @see HttpClientBuilder#setDefaultRequestConfig
|
||||
*/
|
||||
public interface RequestConfigCallback {
|
||||
/**
|
||||
* Allows to customize the {@link RequestConfig} that will be used with each request.
|
||||
* It is common to customize the different timeout values through this method without losing any other useful default
|
||||
* value that the {@link RestClient.Builder} internally sets.
|
||||
*/
|
||||
void customizeRequestConfig(RequestConfig.Builder requestConfigBuilder);
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback used to customize the {@link CloseableHttpClient} instance used by a {@link RestClient} instance.
|
||||
* Allows to customize default {@link RequestConfig} being set to the client and any parameter that
|
||||
* can be set through {@link HttpClientBuilder}
|
||||
*/
|
||||
public interface HttpClientConfigCallback {
|
||||
/**
|
||||
* Allows to customize the {@link CloseableHttpClient} being created and used by the {@link RestClient}.
|
||||
* It is common to customzie the default {@link org.apache.http.client.CredentialsProvider} through this method,
|
||||
* without losing any other useful default value that the {@link RestClient.Builder} internally sets.
|
||||
* Also useful to setup ssl through {@link SSLSocketFactoryHttpConfigCallback}.
|
||||
*/
|
||||
void customizeHttpClient(HttpClientBuilder httpClientBuilder);
|
||||
}
|
||||
|
||||
/**
|
||||
* Listener that allows to be notified whenever a failure happens. Useful when sniffing is enabled, so that we can sniff on failure.
|
||||
* The default implementation is a no-op.
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.config.Registry;
|
||||
import org.apache.http.config.RegistryBuilder;
|
||||
import org.apache.http.conn.socket.ConnectionSocketFactory;
|
||||
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
|
||||
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
|
||||
|
||||
/**
|
||||
* Helps configuring the http client when needing to communicate over ssl. It effectively replaces the connection manager
|
||||
* with one that has ssl properly configured thanks to the provided {@link SSLConnectionSocketFactory}.
|
||||
*/
|
||||
public class SSLSocketFactoryHttpConfigCallback implements RestClient.HttpClientConfigCallback {
|
||||
|
||||
private final SSLConnectionSocketFactory sslSocketFactory;
|
||||
|
||||
public SSLSocketFactoryHttpConfigCallback(SSLConnectionSocketFactory sslSocketFactory) {
|
||||
this.sslSocketFactory = sslSocketFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void customizeHttpClient(HttpClientBuilder httpClientBuilder) {
|
||||
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder.<ConnectionSocketFactory>create()
|
||||
.register("http", PlainConnectionSocketFactory.getSocketFactory())
|
||||
.register("https", sslSocketFactory).build();
|
||||
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
|
||||
//default settings may be too constraining
|
||||
connectionManager.setDefaultMaxPerRoute(10);
|
||||
connectionManager.setMaxTotal(30);
|
||||
httpClientBuilder.setConnectionManager(connectionManager);
|
||||
}
|
||||
}
|
|
@ -50,7 +50,14 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
|
||||
public void testTraceRequest() throws IOException, URISyntaxException {
|
||||
HttpHost host = new HttpHost("localhost", 9200, getRandom().nextBoolean() ? "http" : "https");
|
||||
URI uri = new URI("/index/type/_api");
|
||||
|
||||
String expectedEndpoint = "/index/type/_api";
|
||||
URI uri;
|
||||
if (randomBoolean()) {
|
||||
uri = new URI(expectedEndpoint);
|
||||
} else {
|
||||
uri = new URI("index/type/_api");
|
||||
}
|
||||
|
||||
HttpRequestBase request;
|
||||
int requestType = RandomInts.randomIntBetween(getRandom(), 0, 7);
|
||||
|
@ -83,7 +90,7 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
String expected = "curl -iX " + request.getMethod() + " '" + host + uri + "'";
|
||||
String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'";
|
||||
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
|
||||
String requestBody = "{ \"field\": \"value\" }";
|
||||
if (hasBody) {
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.client;
|
|||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
|
||||
|
@ -67,7 +68,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("default headers must not be null", e.getMessage());
|
||||
assertEquals("defaultHeaders must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -81,7 +82,21 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
RestClient.builder(new HttpHost("localhost", 9200)).setFailureListener(null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("failure listener must not be null", e.getMessage());
|
||||
assertEquals("failureListener must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200)).setHttpClientConfigCallback(null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("httpClientConfigCallback must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200)).setRequestConfigCallback(null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("requestConfigCallback must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
|
@ -91,7 +106,18 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
}
|
||||
RestClient.Builder builder = RestClient.builder(hosts);
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setHttpClient(HttpClientBuilder.create().build());
|
||||
builder.setHttpClientConfigCallback(new RestClient.HttpClientConfigCallback() {
|
||||
@Override
|
||||
public void customizeHttpClient(HttpClientBuilder httpClientBuilder) {
|
||||
}
|
||||
});
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setRequestConfigCallback(new RestClient.RequestConfigCallback() {
|
||||
@Override
|
||||
public void customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
|
||||
}
|
||||
});
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpRequest;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
|
@ -92,7 +92,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
httpHosts[i] = new HttpHost("localhost", 9200 + i);
|
||||
}
|
||||
failureListener = new TrackingFailureListener();
|
||||
restClient = RestClient.builder(httpHosts).setHttpClient(httpClient).setFailureListener(failureListener).build();
|
||||
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, failureListener);
|
||||
}
|
||||
|
||||
public void testRoundRobinOkStatusCodes() throws Exception {
|
||||
|
@ -102,8 +102,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
Collections.addAll(hostsSet, httpHosts);
|
||||
for (int j = 0; j < httpHosts.length; j++) {
|
||||
int statusCode = randomOkStatusCode(getRandom());
|
||||
try (Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode,
|
||||
Collections.<String, String>emptyMap(), null)) {
|
||||
try (Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) {
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
|
||||
assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
|
||||
}
|
||||
|
@ -121,8 +120,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
for (int j = 0; j < httpHosts.length; j++) {
|
||||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomErrorNoRetryStatusCode(getRandom());
|
||||
try (Response response = restClient.performRequest(method, "/" + statusCode,
|
||||
Collections.<String, String>emptyMap(), null)) {
|
||||
try (Response response = restClient.performRequest(method, "/" + statusCode)) {
|
||||
if (method.equals("HEAD") && statusCode == 404) {
|
||||
//no exception gets thrown although we got a 404
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(404));
|
||||
|
@ -149,7 +147,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
public void testRoundRobinRetryErrors() throws Exception {
|
||||
String retryEndpoint = randomErrorRetryEndpoint();
|
||||
try {
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint, Collections.<String, String>emptyMap(), null);
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
|
||||
fail("request should have failed");
|
||||
} catch(ResponseException e) {
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
|
@ -199,7 +197,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
for (int j = 0; j < httpHosts.length; j++) {
|
||||
retryEndpoint = randomErrorRetryEndpoint();
|
||||
try {
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint, Collections.<String, String>emptyMap(), null);
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
|
||||
fail("request should have failed");
|
||||
} catch(ResponseException e) {
|
||||
Response response = e.getResponse();
|
||||
|
@ -225,8 +223,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
for (int y = 0; y < iters; y++) {
|
||||
int statusCode = randomErrorNoRetryStatusCode(getRandom());
|
||||
Response response;
|
||||
try (Response esResponse = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode,
|
||||
Collections.<String, String>emptyMap(), null)) {
|
||||
try (Response esResponse = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) {
|
||||
response = esResponse;
|
||||
}
|
||||
catch(ResponseException e) {
|
||||
|
@ -245,8 +242,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
for (int y = 0; y < i + 1; y++) {
|
||||
retryEndpoint = randomErrorRetryEndpoint();
|
||||
try {
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint,
|
||||
Collections.<String, String>emptyMap(), null);
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
|
||||
fail("request should have failed");
|
||||
} catch(ResponseException e) {
|
||||
Response response = e.getResponse();
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import org.apache.http.Header;
|
||||
|
@ -129,8 +128,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
httpHost = new HttpHost("localhost", 9200);
|
||||
failureListener = new TrackingFailureListener();
|
||||
restClient = RestClient.builder(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders)
|
||||
.setFailureListener(failureListener).build();
|
||||
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, failureListener);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -156,7 +154,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSetNodes() throws IOException {
|
||||
public void testSetHosts() throws IOException {
|
||||
try {
|
||||
restClient.setHosts((HttpHost[]) null);
|
||||
fail("setHosts should have failed");
|
||||
|
@ -189,8 +187,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
public void testOkStatusCodes() throws Exception {
|
||||
for (String method : getHttpMethods()) {
|
||||
for (int okStatusCode : getOkStatusCodes()) {
|
||||
Response response = restClient.performRequest(method, "/" + okStatusCode,
|
||||
Collections.<String, String>emptyMap(), null);
|
||||
Response response = performRequest(method, "/" + okStatusCode);
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
|
||||
}
|
||||
}
|
||||
|
@ -204,8 +201,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
for (String method : getHttpMethods()) {
|
||||
//error status codes should cause an exception to be thrown
|
||||
for (int errorStatusCode : getAllErrorStatusCodes()) {
|
||||
try (Response response = restClient.performRequest(method, "/" + errorStatusCode,
|
||||
Collections.<String, String>emptyMap(), null)) {
|
||||
try (Response response = performRequest(method, "/" + errorStatusCode)) {
|
||||
if (method.equals("HEAD") && errorStatusCode == 404) {
|
||||
//no exception gets thrown although we got a 404
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode));
|
||||
|
@ -231,14 +227,14 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
for (String method : getHttpMethods()) {
|
||||
//IOExceptions should be let bubble up
|
||||
try {
|
||||
restClient.performRequest(method, "/coe", Collections.<String, String>emptyMap(), null);
|
||||
performRequest(method, "/coe");
|
||||
fail("request should have failed");
|
||||
} catch(IOException e) {
|
||||
assertThat(e, instanceOf(ConnectTimeoutException.class));
|
||||
}
|
||||
failureListener.assertCalled(httpHost);
|
||||
try {
|
||||
restClient.performRequest(method, "/soe", Collections.<String, String>emptyMap(), null);
|
||||
performRequest(method, "/soe");
|
||||
fail("request should have failed");
|
||||
} catch(IOException e) {
|
||||
assertThat(e, instanceOf(SocketTimeoutException.class));
|
||||
|
@ -275,8 +271,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) {
|
||||
try {
|
||||
restClient.performRequest(method, "/" + randomStatusCode(getRandom()),
|
||||
Collections.<String, String>emptyMap(), entity);
|
||||
restClient.performRequest(method, "/" + randomStatusCode(getRandom()), Collections.<String, String>emptyMap(), entity);
|
||||
fail("request should have failed");
|
||||
} catch(UnsupportedOperationException e) {
|
||||
assertThat(e.getMessage(), equalTo(method + " with body is not supported"));
|
||||
|
@ -288,13 +283,13 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), null, (Header[])null);
|
||||
performRequest(method, "/" + statusCode, (Header[])null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("request headers must not be null", e.getMessage());
|
||||
}
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), null, (Header)null);
|
||||
performRequest(method, "/" + statusCode, (Header)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("request header must not be null", e.getMessage());
|
||||
|
@ -305,7 +300,13 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, null, null);
|
||||
restClient.performRequest(method, "/" + statusCode, (Map<String, String>)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("params must not be null", e.getMessage());
|
||||
}
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("params must not be null", e.getMessage());
|
||||
|
@ -352,7 +353,8 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
String uriAsString = "/" + randomStatusCode(getRandom());
|
||||
URIBuilder uriBuilder = new URIBuilder(uriAsString);
|
||||
Map<String, String> params = Collections.emptyMap();
|
||||
if (getRandom().nextBoolean()) {
|
||||
boolean hasParams = randomBoolean();
|
||||
if (hasParams) {
|
||||
int numParams = RandomInts.randomIntBetween(getRandom(), 1, 3);
|
||||
params = new HashMap<>(numParams);
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
|
@ -395,7 +397,8 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
HttpEntity entity = null;
|
||||
if (request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) {
|
||||
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
|
||||
if (hasBody) {
|
||||
entity = new StringEntity(RandomStrings.randomAsciiOfLengthBetween(getRandom(), 10, 100));
|
||||
((HttpEntityEnclosingRequest) request).setEntity(entity);
|
||||
}
|
||||
|
@ -418,10 +421,29 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
try {
|
||||
restClient.performRequest(method, uriAsString, params, entity, headers);
|
||||
if (hasParams == false && hasBody == false && randomBoolean()) {
|
||||
restClient.performRequest(method, uriAsString, headers);
|
||||
} else if (hasBody == false && randomBoolean()) {
|
||||
restClient.performRequest(method, uriAsString, params, headers);
|
||||
} else {
|
||||
restClient.performRequest(method, uriAsString, params, entity, headers);
|
||||
}
|
||||
} catch(ResponseException e) {
|
||||
//all good
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
private Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
switch(randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
return restClient.performRequest(method, endpoint, headers);
|
||||
case 1:
|
||||
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers);
|
||||
case 2:
|
||||
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, headers);
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,8 @@ apply plugin: 'ru.vyarus.animalsniffer'
|
|||
targetCompatibility = JavaVersion.VERSION_1_7
|
||||
sourceCompatibility = JavaVersion.VERSION_1_7
|
||||
|
||||
group = 'org.elasticsearch.client'
|
||||
|
||||
dependencies {
|
||||
compile "org.elasticsearch.client:rest:${version}"
|
||||
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
|
||||
|
|
|
@ -62,7 +62,7 @@ public class HostsSniffer {
|
|||
* Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts
|
||||
*/
|
||||
public List<HttpHost> sniffHosts() throws IOException {
|
||||
try (Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams, null)) {
|
||||
try (Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams)) {
|
||||
return readHosts(response.getEntity());
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ public class HostsSniffer {
|
|||
|
||||
private final RestClient restClient;
|
||||
private long sniffRequestTimeoutMillis = DEFAULT_SNIFF_REQUEST_TIMEOUT;
|
||||
private Scheme scheme;
|
||||
private Scheme scheme = Scheme.HTTP;
|
||||
|
||||
private Builder(RestClient restClient) {
|
||||
Objects.requireNonNull(restClient, "restClient cannot be null");
|
||||
|
|
|
@ -89,7 +89,11 @@ public class HostsSnifferTests extends RestClientTestCase {
|
|||
public void testSniffNodes() throws IOException, URISyntaxException {
|
||||
HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
|
||||
try (RestClient restClient = RestClient.builder(httpHost).build()) {
|
||||
HostsSniffer sniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme);
|
||||
HostsSniffer.Builder builder = HostsSniffer.builder(restClient).setSniffRequestTimeoutMillis(sniffRequestTimeout);
|
||||
if (scheme != HostsSniffer.Scheme.HTTP || randomBoolean()) {
|
||||
builder.setScheme(scheme);
|
||||
}
|
||||
HostsSniffer sniffer = builder.build();
|
||||
try {
|
||||
List<HttpHost> sniffedHosts = sniffer.sniffHosts();
|
||||
if (sniffResponse.isFailure) {
|
||||
|
|
|
@ -26,6 +26,9 @@ apply plugin: 'ru.vyarus.animalsniffer'
|
|||
targetCompatibility = JavaVersion.VERSION_1_7
|
||||
sourceCompatibility = JavaVersion.VERSION_1_7
|
||||
|
||||
install.enabled = false
|
||||
uploadArchives.enabled = false
|
||||
|
||||
dependencies {
|
||||
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
compile "junit:junit:${versions.junit}"
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
group = 'org.elasticsearch.client'
|
||||
|
||||
dependencies {
|
||||
compile "org.elasticsearch:elasticsearch:${version}"
|
||||
compile project(path: ':modules:transport-netty3', configuration: 'runtime')
|
||||
compile project(path: ':modules:reindex', configuration: 'runtime')
|
||||
compile project(path: ':modules:lang-mustache', configuration: 'runtime')
|
||||
compile project(path: ':modules:percolator', configuration: 'runtime')
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
dependencies = project.configurations.runtime.fileCollection {
|
||||
it.group.startsWith('org.elasticsearch') == false
|
||||
}
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
// we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to
|
||||
// be pulled in
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
|
||||
PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')]
|
||||
}
|
||||
|
||||
namingConventions {
|
||||
testClass = 'com.carrotsearch.randomizedtesting.RandomizedTest'
|
||||
//we don't have integration tests
|
||||
skipIntegTestInDisguise = true
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.transport.client;
|
||||
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.reindex.ReindexPlugin;
|
||||
import org.elasticsearch.percolator.PercolatorPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.mustache.MustachePlugin;
|
||||
import org.elasticsearch.transport.Netty3Plugin;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
|
||||
/**
|
||||
* A builder to create an instance of {@link TransportClient}
|
||||
* This class pre-installs the {@link Netty3Plugin}, {@link ReindexPlugin}, {@link PercolatorPlugin}, and {@link MustachePlugin}
|
||||
* for the client. These plugins are all elasticsearch core modules required.
|
||||
*/
|
||||
@SuppressWarnings({"unchecked","varargs"})
|
||||
public class PreBuiltTransportClient extends TransportClient {
|
||||
private static final Collection<Class<? extends Plugin>> PRE_INSTALLED_PLUGINS = Collections.unmodifiableList(Arrays.asList(
|
||||
TransportPlugin.class, ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class));
|
||||
|
||||
@SafeVarargs
|
||||
public PreBuiltTransportClient(Settings settings, Class<? extends Plugin>... plugins) {
|
||||
this(settings, Arrays.asList(plugins));
|
||||
}
|
||||
|
||||
public PreBuiltTransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS));
|
||||
}
|
||||
|
||||
/**
|
||||
* The default transport implementation for the transport client.
|
||||
*/
|
||||
public static final class TransportPlugin extends Netty3Plugin {
|
||||
// disable assertions for permissions since we might not have the permissions here
|
||||
// compared to if we are loaded as a real module to the es server
|
||||
public TransportPlugin(Settings settings) {
|
||||
super(Settings.builder().put("netty.assert.buglevel", false).put(settings).build());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.transport.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.reindex.ReindexPlugin;
|
||||
import org.elasticsearch.percolator.PercolatorPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.mustache.MustachePlugin;
|
||||
import org.elasticsearch.transport.Netty3Plugin;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class PreBuiltTransportClientTests extends RandomizedTest {
|
||||
|
||||
@Test
|
||||
public void testPluginInstalled() {
|
||||
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
|
||||
Settings settings = client.settings();
|
||||
assertEquals(Netty3Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInstallPluginTwice() {
|
||||
|
||||
for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class,
|
||||
MustachePlugin.class)) {
|
||||
try {
|
||||
new PreBuiltTransportClient(Settings.EMPTY, plugin);
|
||||
fail("exception expected");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("plugin is already installed", ex.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -56,10 +56,10 @@ dependencies {
|
|||
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
|
||||
|
||||
compile 'org.elasticsearch:securesm:1.0'
|
||||
compile 'org.elasticsearch:securesm:1.1'
|
||||
|
||||
// utilities
|
||||
compile 'net.sf.jopt-simple:jopt-simple:4.9'
|
||||
compile 'net.sf.jopt-simple:jopt-simple:5.0.2'
|
||||
compile 'com.carrotsearch:hppc:0.7.1'
|
||||
|
||||
// time handling, remove with java 8 time
|
||||
|
@ -74,8 +74,6 @@ dependencies {
|
|||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
|
||||
|
||||
// network stack
|
||||
compile 'io.netty:netty:3.10.5.Final'
|
||||
// percentiles aggregation
|
||||
compile 'com.tdunning:t-digest:3.0'
|
||||
// precentil ranks aggregation
|
||||
|
@ -152,26 +150,11 @@ processResources {
|
|||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
|
||||
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
|
||||
|
||||
// classes are missing!
|
||||
|
||||
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder (netty)
|
||||
'com.google.protobuf.CodedInputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender (netty)
|
||||
'com.google.protobuf.CodedOutputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufDecoder (netty)
|
||||
'com.google.protobuf.ExtensionRegistry',
|
||||
'com.google.protobuf.MessageLite$Builder',
|
||||
'com.google.protobuf.MessageLite',
|
||||
'com.google.protobuf.Parser',
|
||||
|
||||
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
|
||||
'javax.jms.Message',
|
||||
'javax.jms.MessageListener',
|
||||
|
@ -196,72 +179,8 @@ thirdPartyAudit.excludes = [
|
|||
'javax.mail.internet.MimeMessage',
|
||||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
|
||||
// from org.jboss.netty.channel.socket.http.HttpTunnelingServlet (netty)
|
||||
'javax.servlet.ServletConfig',
|
||||
'javax.servlet.ServletException',
|
||||
'javax.servlet.ServletOutputStream',
|
||||
'javax.servlet.http.HttpServlet',
|
||||
'javax.servlet.http.HttpServletRequest',
|
||||
'javax.servlet.http.HttpServletResponse',
|
||||
|
||||
// from org.jboss.netty.logging.CommonsLoggerFactory (netty)
|
||||
'org.apache.commons.logging.Log',
|
||||
'org.apache.commons.logging.LogFactory',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||
'org.apache.tomcat.jni.Buffer',
|
||||
'org.apache.tomcat.jni.Library',
|
||||
'org.apache.tomcat.jni.Pool',
|
||||
'org.apache.tomcat.jni.SSL',
|
||||
'org.apache.tomcat.jni.SSLContext',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty)
|
||||
'org.bouncycastle.asn1.x500.X500Name',
|
||||
'org.bouncycastle.cert.X509v3CertificateBuilder',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder',
|
||||
'org.bouncycastle.jce.provider.BouncyCastleProvider',
|
||||
'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.JettyNpnSslEngine (netty)
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ClientProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ServerProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego',
|
||||
|
||||
// from org.jboss.netty.logging.JBossLoggerFactory (netty)
|
||||
'org.jboss.logging.Logger',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteInput (netty)
|
||||
'org.jboss.marshalling.ByteInput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty)
|
||||
'org.jboss.marshalling.ByteOutput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty)
|
||||
'org.jboss.marshalling.Marshaller',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty)
|
||||
'org.jboss.marshalling.MarshallerFactory',
|
||||
'org.jboss.marshalling.MarshallingConfiguration',
|
||||
'org.jboss.marshalling.Unmarshaller',
|
||||
|
||||
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
|
||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||
'org.osgi.framework.BundleActivator',
|
||||
'org.osgi.framework.BundleContext',
|
||||
|
||||
// from org.jboss.netty.logging.OsgiLoggerFactory$1 (netty)
|
||||
'org.osgi.framework.ServiceReference',
|
||||
'org.osgi.service.log.LogService',
|
||||
'org.osgi.util.tracker.ServiceTracker',
|
||||
'org.osgi.util.tracker.ServiceTrackerCustomizer',
|
||||
|
||||
// from org.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
|
||||
'org.slf4j.Logger',
|
||||
'org.slf4j.LoggerFactory',
|
||||
]
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
|
|
|
@ -1110,7 +1110,7 @@ public long ramBytesUsed() {
|
|||
this.analyzed.copyBytes(analyzed);
|
||||
}
|
||||
|
||||
private final static class SurfaceFormAndPayload implements Comparable<SurfaceFormAndPayload> {
|
||||
private static final class SurfaceFormAndPayload implements Comparable<SurfaceFormAndPayload> {
|
||||
BytesRef payload;
|
||||
long weight;
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.search.MultiPhraseQuery;
|
|||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
|
||||
|
@ -71,6 +72,9 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
} else if (sourceQuery instanceof BlendedTermQuery) {
|
||||
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
|
||||
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof ToParentBlockJoinQuery) {
|
||||
ToParentBlockJoinQuery blockJoinQuery = (ToParentBlockJoinQuery) sourceQuery;
|
||||
flatten(blockJoinQuery.getChildQuery(), reader, flatQueries, boost);
|
||||
} else {
|
||||
super.flatten(sourceQuery, reader, flatQueries, boost);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
|||
*/
|
||||
public class StoreRateLimiting {
|
||||
|
||||
public static interface Provider {
|
||||
public interface Provider {
|
||||
|
||||
StoreRateLimiting rateLimiting();
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.rest.RestStatus;
|
|||
import org.elasticsearch.transport.TcpTransport;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -100,18 +99,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
}
|
||||
|
||||
public ElasticsearchException(StreamInput in) throws IOException {
|
||||
super(in.readOptionalString(), in.readThrowable());
|
||||
super(in.readOptionalString(), in.readException());
|
||||
readStackTrace(this, in);
|
||||
int numKeys = in.readVInt();
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
final String key = in.readString();
|
||||
final int numValues = in.readVInt();
|
||||
final ArrayList<String> values = new ArrayList<>(numValues);
|
||||
for (int j = 0; j < numValues; j++) {
|
||||
values.add(in.readString());
|
||||
}
|
||||
headers.put(key, values);
|
||||
}
|
||||
headers.putAll(in.readMapOfLists());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -162,7 +152,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
* Unwraps the actual cause from the exception for cases when the exception is a
|
||||
* {@link ElasticsearchWrapperException}.
|
||||
*
|
||||
* @see org.elasticsearch.ExceptionsHelper#unwrapCause(Throwable)
|
||||
* @see ExceptionsHelper#unwrapCause(Throwable)
|
||||
*/
|
||||
public Throwable unwrapCause() {
|
||||
return ExceptionsHelper.unwrapCause(this);
|
||||
|
@ -204,16 +194,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalString(this.getMessage());
|
||||
out.writeThrowable(this.getCause());
|
||||
out.writeException(this.getCause());
|
||||
writeStackTraces(this, out);
|
||||
out.writeVInt(headers.size());
|
||||
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeVInt(entry.getValue().size());
|
||||
for (String v : entry.getValue()) {
|
||||
out.writeString(v);
|
||||
}
|
||||
}
|
||||
out.writeMapOfLists(headers);
|
||||
}
|
||||
|
||||
public static ElasticsearchException readException(StreamInput input, int id) throws IOException {
|
||||
|
@ -415,7 +398,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
|
||||
int numSuppressed = in.readVInt();
|
||||
for (int i = 0; i < numSuppressed; i++) {
|
||||
throwable.addSuppressed(in.readThrowable());
|
||||
throwable.addSuppressed(in.readException());
|
||||
}
|
||||
return throwable;
|
||||
}
|
||||
|
@ -435,7 +418,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
Throwable[] suppressed = throwable.getSuppressed();
|
||||
out.writeVInt(suppressed.length);
|
||||
for (Throwable t : suppressed) {
|
||||
out.writeThrowable(t);
|
||||
out.writeException(t);
|
||||
}
|
||||
return throwable;
|
||||
}
|
||||
|
@ -675,8 +658,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
|
||||
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class,
|
||||
org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
|
||||
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class,
|
||||
org.elasticsearch.script.Script.ScriptParseException::new, 124),
|
||||
// 124 used to be Script.ScriptParseException
|
||||
HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class,
|
||||
TcpTransport.HttpOnTransportException::new, 125),
|
||||
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
|
||||
|
@ -709,7 +691,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
org.elasticsearch.index.query.QueryShardException::new, 141),
|
||||
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
|
||||
ShardStateAction.NoLongerPrimaryShardException::new, 142),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143);
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143),
|
||||
NOT_MASTER_EXCEPTION(org.elasticsearch.cluster.NotMasterException.class, org.elasticsearch.cluster.NotMasterException::new, 144);
|
||||
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
|
@ -794,9 +777,9 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
return null;
|
||||
}
|
||||
|
||||
public static void renderThrowable(XContentBuilder builder, Params params, Throwable t) throws IOException {
|
||||
public static void renderException(XContentBuilder builder, Params params, Exception e) throws IOException {
|
||||
builder.startObject("error");
|
||||
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(t);
|
||||
final ElasticsearchException[] rootCauses = ElasticsearchException.guessRootCauses(e);
|
||||
builder.field("root_cause");
|
||||
builder.startArray();
|
||||
for (ElasticsearchException rootCause : rootCauses) {
|
||||
|
@ -806,7 +789,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
ElasticsearchException.toXContent(builder, params, t);
|
||||
ElasticsearchException.toXContent(builder, params, e);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ public class ElasticsearchSecurityException extends ElasticsearchException {
|
|||
this.status = status ;
|
||||
}
|
||||
|
||||
public ElasticsearchSecurityException(String msg, Throwable cause, Object... args) {
|
||||
public ElasticsearchSecurityException(String msg, Exception cause, Object... args) {
|
||||
this(msg, ExceptionsHelper.status(cause), cause, args);
|
||||
}
|
||||
|
||||
|
|
|
@ -37,25 +37,22 @@ import java.util.HashSet;
|
|||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public final class ExceptionsHelper {
|
||||
|
||||
private static final ESLogger logger = Loggers.getLogger(ExceptionsHelper.class);
|
||||
|
||||
public static RuntimeException convertToRuntime(Throwable t) {
|
||||
if (t instanceof RuntimeException) {
|
||||
return (RuntimeException) t;
|
||||
public static RuntimeException convertToRuntime(Exception e) {
|
||||
if (e instanceof RuntimeException) {
|
||||
return (RuntimeException) e;
|
||||
}
|
||||
return new ElasticsearchException(t);
|
||||
return new ElasticsearchException(e);
|
||||
}
|
||||
|
||||
public static ElasticsearchException convertToElastic(Throwable t) {
|
||||
if (t instanceof ElasticsearchException) {
|
||||
return (ElasticsearchException) t;
|
||||
public static ElasticsearchException convertToElastic(Exception e) {
|
||||
if (e instanceof ElasticsearchException) {
|
||||
return (ElasticsearchException) e;
|
||||
}
|
||||
return new ElasticsearchException(t);
|
||||
return new ElasticsearchException(e);
|
||||
}
|
||||
|
||||
public static RestStatus status(Throwable t) {
|
||||
|
@ -164,8 +161,8 @@ public final class ExceptionsHelper {
|
|||
}
|
||||
|
||||
public static IOException unwrapCorruption(Throwable t) {
|
||||
return (IOException) unwrap(t, CorruptIndexException.class,
|
||||
IndexFormatTooOldException.class,
|
||||
return (IOException) unwrap(t, CorruptIndexException.class,
|
||||
IndexFormatTooOldException.class,
|
||||
IndexFormatTooNewException.class);
|
||||
}
|
||||
|
||||
|
@ -209,7 +206,6 @@ public final class ExceptionsHelper {
|
|||
return true;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Deduplicate the failures by exception message and index.
|
||||
*/
|
||||
|
|
|
@ -69,6 +69,8 @@ public class Version {
|
|||
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_3_ID = 2030399;
|
||||
public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_4_ID = 2030499;
|
||||
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
|
@ -77,7 +79,9 @@ public class Version {
|
|||
public static final Version V_5_0_0_alpha3 = new Version(V_5_0_0_alpha3_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha4_ID = 5000004;
|
||||
public static final Version V_5_0_0_alpha4 = new Version(V_5_0_0_alpha4_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha4;
|
||||
public static final int V_5_0_0_alpha5_ID = 5000005;
|
||||
public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha5;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
|
@ -90,6 +94,8 @@ public class Version {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_5_0_0_alpha5_ID:
|
||||
return V_5_0_0_alpha5;
|
||||
case V_5_0_0_alpha4_ID:
|
||||
return V_5_0_0_alpha4;
|
||||
case V_5_0_0_alpha3_ID:
|
||||
|
@ -98,6 +104,8 @@ public class Version {
|
|||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_3_4_ID:
|
||||
return V_2_3_4;
|
||||
case V_2_3_3_ID:
|
||||
return V_2_3_3;
|
||||
case V_2_3_2_ID:
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* A listener for action responses or failures.
|
||||
*/
|
||||
|
@ -32,5 +34,32 @@ public interface ActionListener<Response> {
|
|||
/**
|
||||
* A failure caused by an exception at some phase of the task.
|
||||
*/
|
||||
void onFailure(Throwable e);
|
||||
void onFailure(Exception e);
|
||||
|
||||
/**
|
||||
* Creates a listener that listens for a response (or failure) and executes the
|
||||
* corresponding consumer when the response (or failure) is received.
|
||||
*
|
||||
* @param onResponse the consumer of the response, when the listener receives one
|
||||
* @param onFailure the consumer of the failure, when the listener receives one
|
||||
* @param <Response> the type of the response
|
||||
* @return a listener that listens for responses and invokes the consumer when received
|
||||
*/
|
||||
static <Response> ActionListener<Response> wrap(Consumer<Response> onResponse, Consumer<Exception> onFailure) {
|
||||
return new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
try {
|
||||
onResponse.accept(response);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
onFailure.accept(e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
package org.elasticsearch.action;
|
||||
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
|
||||
|
@ -31,7 +31,7 @@ import java.util.function.Supplier;
|
|||
* A simple base class for action response listeners, defaulting to using the SAME executor (as its
|
||||
* very common on response handlers).
|
||||
*/
|
||||
public class ActionListenerResponseHandler<Response extends TransportResponse> extends BaseTransportResponseHandler<Response> {
|
||||
public class ActionListenerResponseHandler<Response extends TransportResponse> implements TransportResponseHandler<Response> {
|
||||
|
||||
private final ActionListener<Response> listener;
|
||||
private final Supplier<Response> responseSupplier;
|
||||
|
|
|
@ -19,6 +19,14 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
|
||||
|
@ -303,12 +311,6 @@ import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
|
|||
import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
|
||||
import org.elasticsearch.rest.action.update.RestUpdateAction;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
|
@ -335,7 +337,12 @@ public class ActionModule extends AbstractModule {
|
|||
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
|
||||
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
|
||||
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
|
||||
restController = new RestController(settings);
|
||||
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
|
||||
restController = new RestController(settings, headers);
|
||||
}
|
||||
|
||||
public Map<String, ActionHandler<?, ?>> getActions() {
|
||||
return actions;
|
||||
}
|
||||
|
||||
static Map<String, ActionHandler<?, ?>> setupActions(List<ActionPlugin> actionPlugins) {
|
||||
|
@ -621,14 +628,6 @@ public class ActionModule extends AbstractModule {
|
|||
bind(ActionFilters.class).asEagerSingleton();
|
||||
bind(DestructiveOperations.class).toInstance(destructiveOperations);
|
||||
|
||||
// register Name -> GenericAction Map that can be injected to instances.
|
||||
@SuppressWarnings("rawtypes")
|
||||
MapBinder<String, GenericAction> actionsBinder
|
||||
= MapBinder.newMapBinder(binder(), String.class, GenericAction.class);
|
||||
|
||||
for (Map.Entry<String, ActionHandler<?, ?>> entry : actions.entrySet()) {
|
||||
actionsBinder.addBinding(entry.getKey()).toInstance(entry.getValue().getAction());
|
||||
}
|
||||
if (false == transportClient) {
|
||||
// Supporting classes only used when not a transport client
|
||||
bind(AutoCreateIndex.class).toInstance(autoCreateIndex);
|
||||
|
|
|
@ -22,11 +22,11 @@ package org.elasticsearch.action;
|
|||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
|
||||
/**
|
||||
* Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Throwable)} in case an uncaught
|
||||
* Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught
|
||||
* exception or error is thrown while the actual action is run.
|
||||
*/
|
||||
public abstract class ActionRunnable<Response> extends AbstractRunnable {
|
||||
|
||||
|
||||
protected final ActionListener<Response> listener;
|
||||
|
||||
public ActionRunnable(ActionListener<Response> listener) {
|
||||
|
@ -34,11 +34,11 @@ public abstract class ActionRunnable<Response> extends AbstractRunnable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Calls the action listeners {@link ActionListener#onFailure(Throwable)} method with the given exception.
|
||||
* Calls the action listeners {@link ActionListener#onFailure(Exception)} method with the given exception.
|
||||
* This method is invoked for all exception thrown by {@link #doRun()}
|
||||
*/
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
listener.onFailure(t);
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -45,7 +45,7 @@ public class LatchedActionListener<T> implements ActionListener<T> {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
delegate.onFailure(e);
|
||||
} finally {
|
||||
|
|
|
@ -43,15 +43,15 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
|
|||
|
||||
private final long taskId;
|
||||
|
||||
private final Throwable reason;
|
||||
private final Exception reason;
|
||||
|
||||
private final RestStatus status;
|
||||
|
||||
public TaskOperationFailure(String nodeId, long taskId, Throwable t) {
|
||||
public TaskOperationFailure(String nodeId, long taskId, Exception e) {
|
||||
this.nodeId = nodeId;
|
||||
this.taskId = taskId;
|
||||
this.reason = t;
|
||||
status = ExceptionsHelper.status(t);
|
||||
this.reason = e;
|
||||
status = ExceptionsHelper.status(e);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -60,7 +60,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
|
|||
public TaskOperationFailure(StreamInput in) throws IOException {
|
||||
nodeId = in.readString();
|
||||
taskId = in.readLong();
|
||||
reason = in.readThrowable();
|
||||
reason = in.readException();
|
||||
status = RestStatus.readFrom(in);
|
||||
}
|
||||
|
||||
|
@ -68,7 +68,7 @@ public final class TaskOperationFailure implements Writeable, ToXContent {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(nodeId);
|
||||
out.writeLong(taskId);
|
||||
out.writeThrowable(reason);
|
||||
out.writeException(reason);
|
||||
RestStatus.writeTo(out, status);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,6 @@ public class TransportActionNodeProxy<Request extends ActionRequest, Response ex
|
|||
private final GenericAction<Request, Response> action;
|
||||
private final TransportRequestOptions transportOptions;
|
||||
|
||||
@Inject
|
||||
public TransportActionNodeProxy(Settings settings, GenericAction<Request, Response> action, TransportService transportService) {
|
||||
super(settings);
|
||||
this.action = action;
|
||||
|
|
|
@ -52,6 +52,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
private Integer shard;
|
||||
private Boolean primary;
|
||||
private boolean includeYesDecisions = false;
|
||||
private boolean includeDiskInfo = false;
|
||||
|
||||
/** Explain the first unassigned shard */
|
||||
public ClusterAllocationExplainRequest() {
|
||||
|
@ -134,6 +135,16 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
return this.includeYesDecisions;
|
||||
}
|
||||
|
||||
/** {@code true} to include information about the gathered disk information of nodes in the cluster */
|
||||
public void includeDiskInfo(boolean includeDiskInfo) {
|
||||
this.includeDiskInfo = includeDiskInfo;
|
||||
}
|
||||
|
||||
/** Returns true if information about disk usage and shard sizes should also be returned */
|
||||
public boolean includeDiskInfo() {
|
||||
return this.includeDiskInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("ClusterAllocationExplainRequest[");
|
||||
|
@ -164,6 +175,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
this.shard = in.readOptionalVInt();
|
||||
this.primary = in.readOptionalBoolean();
|
||||
this.includeYesDecisions = in.readBoolean();
|
||||
this.includeDiskInfo = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -173,5 +185,6 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
out.writeOptionalVInt(shard);
|
||||
out.writeOptionalBoolean(primary);
|
||||
out.writeBoolean(includeYesDecisions);
|
||||
out.writeBoolean(includeDiskInfo);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,6 +53,18 @@ public class ClusterAllocationExplainRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
/** Whether to include "YES" decider decisions in the response instead of only "NO" decisions */
|
||||
public ClusterAllocationExplainRequestBuilder setIncludeYesDecisions(boolean includeYesDecisions) {
|
||||
request.includeYesDecisions(includeYesDecisions);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Whether to include information about the gathered disk information of nodes in the cluster */
|
||||
public ClusterAllocationExplainRequestBuilder setIncludeDiskInfo(boolean includeDiskInfo) {
|
||||
request.includeDiskInfo(includeDiskInfo);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Signal that the first unassigned shard should be used
|
||||
*/
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -48,10 +49,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
private final long allocationDelayMillis;
|
||||
private final long remainingDelayMillis;
|
||||
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
|
||||
private final ClusterInfo clusterInfo;
|
||||
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
|
||||
long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations, @Nullable ClusterInfo clusterInfo) {
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.hasPendingAsyncFetch = hasPendingAsyncFetch;
|
||||
|
@ -60,6 +62,7 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
this.allocationDelayMillis = allocationDelayMillis;
|
||||
this.remainingDelayMillis = remainingDelayMillis;
|
||||
this.nodeExplanations = nodeExplanations;
|
||||
this.clusterInfo = clusterInfo;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation(StreamInput in) throws IOException {
|
||||
|
@ -78,6 +81,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation);
|
||||
}
|
||||
this.nodeExplanations = nodeToExplanation;
|
||||
if (in.readBoolean()) {
|
||||
this.clusterInfo = new ClusterInfo(in);
|
||||
} else {
|
||||
this.clusterInfo = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -94,6 +102,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
for (NodeExplanation explanation : this.nodeExplanations.values()) {
|
||||
explanation.writeTo(out);
|
||||
}
|
||||
if (this.clusterInfo != null) {
|
||||
out.writeBoolean(true);
|
||||
this.clusterInfo.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
|
||||
/** Return the shard that the explanation is about */
|
||||
|
@ -143,6 +157,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
return this.nodeExplanations;
|
||||
}
|
||||
|
||||
/** Return the cluster disk info for the cluster or null if none available */
|
||||
@Nullable
|
||||
public ClusterInfo getClusterInfo() {
|
||||
return this.clusterInfo;
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(); {
|
||||
builder.startObject("shard"); {
|
||||
|
@ -164,11 +184,18 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
|
||||
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
|
||||
}
|
||||
builder.startObject("nodes");
|
||||
for (NodeExplanation explanation : nodeExplanations.values()) {
|
||||
explanation.toXContent(builder, params);
|
||||
builder.startObject("nodes"); {
|
||||
for (NodeExplanation explanation : nodeExplanations.values()) {
|
||||
explanation.toXContent(builder, params);
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end nodes
|
||||
if (this.clusterInfo != null) {
|
||||
builder.startObject("cluster_info"); {
|
||||
this.clusterInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end "cluster_info"
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end wrapping object
|
||||
return builder;
|
||||
|
|
|
@ -43,7 +43,7 @@ public class NodeExplanation implements Writeable, ToXContent {
|
|||
private final String finalExplanation;
|
||||
|
||||
public NodeExplanation(final DiscoveryNode node, final Decision nodeDecision, final Float nodeWeight,
|
||||
final @Nullable IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
@Nullable final IndicesShardStoresResponse.StoreStatus storeStatus,
|
||||
final ClusterAllocationExplanation.FinalDecision finalDecision,
|
||||
final String finalExplanation,
|
||||
final ClusterAllocationExplanation.StoreCopy storeCopy) {
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
|||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
|
@ -145,7 +146,7 @@ public class TransportClusterAllocationExplainAction
|
|||
// No copies of the data
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.NONE;
|
||||
} else {
|
||||
final Throwable storeErr = storeStatus.getStoreException();
|
||||
final Exception storeErr = storeStatus.getStoreException();
|
||||
if (storeErr != null) {
|
||||
if (ExceptionsHelper.unwrapCause(storeErr) instanceof CorruptIndexException) {
|
||||
storeCopy = ClusterAllocationExplanation.StoreCopy.CORRUPT;
|
||||
|
@ -219,7 +220,7 @@ public class TransportClusterAllocationExplainAction
|
|||
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator,
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores,
|
||||
GatewayAllocator gatewayAllocator) {
|
||||
GatewayAllocator gatewayAllocator, ClusterInfo clusterInfo) {
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// get the existing unassigned info if available
|
||||
|
@ -262,16 +263,17 @@ public class TransportClusterAllocationExplainAction
|
|||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
|
||||
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
|
||||
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations);
|
||||
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
|
||||
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations, clusterInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
|
||||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
final RoutingNodes routingNodes = state.getRoutingNodes();
|
||||
final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo();
|
||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime(), false);
|
||||
clusterInfo, System.nanoTime(), false);
|
||||
|
||||
ShardRouting foundShard = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
|
@ -318,12 +320,13 @@ public class TransportClusterAllocationExplainAction
|
|||
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator);
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator,
|
||||
request.includeDiskInfo() ? clusterInfo : null);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -104,9 +104,9 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
listener.onFailure(t);
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.error("unexpected failure during [{}]", e, source);
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -46,8 +46,6 @@ import static java.util.Collections.unmodifiableMap;
|
|||
* Node information (static, does not change over time).
|
||||
*/
|
||||
public class NodeInfo extends BaseNodeResponse {
|
||||
@Nullable
|
||||
private Map<String, String> serviceAttributes;
|
||||
|
||||
private Version version;
|
||||
private Build build;
|
||||
|
@ -85,14 +83,13 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
public NodeInfo() {
|
||||
}
|
||||
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Map<String, String> serviceAttributes, @Nullable Settings settings,
|
||||
public NodeInfo(Version version, Build build, DiscoveryNode node, @Nullable Settings settings,
|
||||
@Nullable OsInfo os, @Nullable ProcessInfo process, @Nullable JvmInfo jvm, @Nullable ThreadPoolInfo threadPool,
|
||||
@Nullable TransportInfo transport, @Nullable HttpInfo http, @Nullable PluginsAndModules plugins, @Nullable IngestInfo ingest,
|
||||
@Nullable ByteSizeValue totalIndexingBuffer) {
|
||||
super(node);
|
||||
this.version = version;
|
||||
this.build = build;
|
||||
this.serviceAttributes = serviceAttributes;
|
||||
this.settings = settings;
|
||||
this.os = os;
|
||||
this.process = process;
|
||||
|
@ -127,14 +124,6 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
return this.build;
|
||||
}
|
||||
|
||||
/**
|
||||
* The service attributes of the node.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, String> getServiceAttributes() {
|
||||
return this.serviceAttributes;
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings of the node.
|
||||
*/
|
||||
|
@ -213,14 +202,6 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
} else {
|
||||
totalIndexingBuffer = null;
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
Map<String, String> builder = new HashMap<>();
|
||||
int size = in.readVInt();
|
||||
for (int i = 0; i < size; i++) {
|
||||
builder.put(in.readString(), in.readString());
|
||||
}
|
||||
serviceAttributes = unmodifiableMap(builder);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
settings = Settings.readSettingsFromStream(in);
|
||||
}
|
||||
|
@ -262,16 +243,6 @@ public class NodeInfo extends BaseNodeResponse {
|
|||
out.writeBoolean(true);
|
||||
out.writeLong(totalIndexingBuffer.bytes());
|
||||
}
|
||||
if (getServiceAttributes() == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeVInt(serviceAttributes.size());
|
||||
for (Map.Entry<String, String> entry : serviceAttributes.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeString(entry.getValue());
|
||||
}
|
||||
}
|
||||
if (settings == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
|
|
|
@ -73,12 +73,6 @@ public class NodesInfoResponse extends BaseNodesResponse<NodeInfo> implements To
|
|||
builder.byteSizeField("total_indexing_buffer", "total_indexing_buffer_in_bytes", nodeInfo.getTotalIndexingBuffer());
|
||||
}
|
||||
|
||||
if (nodeInfo.getServiceAttributes() != null) {
|
||||
for (Map.Entry<String, String> nodeAttribute : nodeInfo.getServiceAttributes().entrySet()) {
|
||||
builder.field(nodeAttribute.getKey(), nodeAttribute.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
builder.startArray("roles");
|
||||
for (DiscoveryNode.Role role : nodeInfo.getNode().getRoles()) {
|
||||
builder.value(role.getRoleName());
|
||||
|
|
|
@ -44,9 +44,9 @@ import org.elasticsearch.tasks.TaskId;
|
|||
import org.elasticsearch.tasks.TaskInfo;
|
||||
import org.elasticsearch.tasks.TaskPersistenceService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -111,7 +111,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
|
|||
GetTaskRequest nodeRequest = request.nodeRequest(clusterService.localNode().getId(), thisTask.getId());
|
||||
taskManager.registerChildTask(thisTask, node.getId());
|
||||
transportService.sendRequest(node, GetTaskAction.NAME, nodeRequest, builder.build(),
|
||||
new BaseTransportResponseHandler<GetTaskResponse>() {
|
||||
new TransportResponseHandler<GetTaskResponse>() {
|
||||
@Override
|
||||
public GetTaskResponse newInstance() {
|
||||
return new GetTaskResponse();
|
||||
|
@ -154,8 +154,8 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
listener.onFailure(t);
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
|
@ -179,7 +179,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
/*
|
||||
* We couldn't load the task from the task index. Instead of 404 we should use the snapshot we took after it finished. If
|
||||
* the error isn't a 404 then we'll just throw it back to the user.
|
||||
|
@ -207,13 +207,13 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
|
|||
public void onResponse(GetResponse getResponse) {
|
||||
try {
|
||||
onGetFinishedTaskFromIndex(getResponse, listener);
|
||||
} catch (Throwable e) {
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
|
||||
// We haven't yet created the index for the task results so it can't be found.
|
||||
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or persisted", e, request.getTaskId()));
|
||||
|
|
|
@ -128,6 +128,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
* {@code group_by=nodes}.
|
||||
*/
|
||||
public void setDiscoveryNodes(DiscoveryNodes discoveryNodes) {
|
||||
//WTF is this? Why isn't this set by default;
|
||||
this.discoveryNodes = discoveryNodes;
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ public class TransportDeleteRepositoryAction extends TransportMasterNodeAction<D
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TransportPutRepositoryAction extends TransportMasterNodeAction<PutR
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -78,7 +78,7 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeAction<V
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -102,9 +102,9 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction<Clu
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to perform [{}]", t, source);
|
||||
super.onFailure(source, t);
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.debug("failed to perform [{}]", e, source);
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -93,11 +93,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onAllNodesAcked(@Nullable Throwable t) {
|
||||
public void onAllNodesAcked(@Nullable Exception e) {
|
||||
if (changed) {
|
||||
reroute(true);
|
||||
} else {
|
||||
super.onAllNodesAcked(t);
|
||||
super.onAllNodesAcked(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -146,10 +146,10 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
public void onFailure(String source, Exception e) {
|
||||
//if the reroute fails we only log
|
||||
logger.debug("failed to perform [{}]", t, source);
|
||||
listener.onFailure(new ElasticsearchException("reroute after update settings failed", t));
|
||||
logger.debug("failed to perform [{}]", e, source);
|
||||
listener.onFailure(new ElasticsearchException("reroute after update settings failed", e));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -165,9 +165,9 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Throwable t) {
|
||||
logger.debug("failed to perform [{}]", t, source);
|
||||
super.onFailure(source, t);
|
||||
public void onFailure(String source, Exception e) {
|
||||
logger.debug("failed to perform [{}]", e, source);
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -94,10 +94,10 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction<Cre
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onSnapshotFailure(Snapshot snapshot, Throwable t) {
|
||||
public void onSnapshotFailure(Snapshot snapshot, Exception e) {
|
||||
if (snapshot.getRepository().equals(request.repository()) &&
|
||||
snapshot.getSnapshotId().getName().equals(request.snapshot())) {
|
||||
listener.onFailure(t);
|
||||
listener.onFailure(e);
|
||||
snapshotsService.removeListener(this);
|
||||
}
|
||||
}
|
||||
|
@ -108,8 +108,8 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction<Cre
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
listener.onFailure(t);
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -72,8 +72,8 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeAction<Del
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
listener.onFailure(t);
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -120,8 +120,8 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction<GetSn
|
|||
snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, new ArrayList<>(toResolve), request.ignoreUnavailable()));
|
||||
}
|
||||
listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder));
|
||||
} catch (Throwable t) {
|
||||
listener.onFailure(t);
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction<Re
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
@ -104,7 +104,7 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction<Re
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
public void onFailure(Exception t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -125,13 +125,13 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
List<SnapshotsInProgress.Entry> currentSnapshots =
|
||||
snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots()));
|
||||
listener.onResponse(buildResponse(request, currentSnapshots, nodeSnapshotStatuses));
|
||||
} catch (Throwable e) {
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
@ -207,15 +207,14 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction<Sn
|
|||
.filter(s -> requestedSnapshotNames.contains(s.getName()))
|
||||
.collect(Collectors.toMap(SnapshotId::getName, Function.identity()));
|
||||
for (final String snapshotName : request.snapshots()) {
|
||||
if (currentSnapshotNames.contains(snapshotName)) {
|
||||
// we've already found this snapshot in the current snapshot entries, so skip over
|
||||
continue;
|
||||
}
|
||||
SnapshotId snapshotId = matchedSnapshotIds.get(snapshotName);
|
||||
if (snapshotId == null) {
|
||||
if (currentSnapshotNames.contains(snapshotName)) {
|
||||
// we've already found this snapshot in the current snapshot entries, so skip over
|
||||
continue;
|
||||
} else {
|
||||
// neither in the current snapshot entries nor found in the repository
|
||||
throw new SnapshotMissingException(repositoryName, snapshotName);
|
||||
}
|
||||
// neither in the current snapshot entries nor found in the repository
|
||||
throw new SnapshotMissingException(repositoryName, snapshotName);
|
||||
}
|
||||
SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId);
|
||||
List<SnapshotIndexShardStatus> shardStatusBuilder = new ArrayList<>();
|
||||
|
|
|
@ -119,7 +119,7 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction<Ind
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to perform aliases", t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
|
|
@ -18,14 +18,22 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.analyze;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
|
@ -39,11 +47,11 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
|
||||
private String analyzer;
|
||||
|
||||
private String tokenizer;
|
||||
private NameOrDefinition tokenizer;
|
||||
|
||||
private String[] tokenFilters = Strings.EMPTY_ARRAY;
|
||||
private final List<NameOrDefinition> tokenFilters = new ArrayList<>();
|
||||
|
||||
private String[] charFilters = Strings.EMPTY_ARRAY;
|
||||
private final List<NameOrDefinition> charFilters = new ArrayList<>();
|
||||
|
||||
private String field;
|
||||
|
||||
|
@ -51,6 +59,48 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
|
||||
private String[] attributes = Strings.EMPTY_ARRAY;
|
||||
|
||||
public static class NameOrDefinition implements Writeable {
|
||||
// exactly one of these two members is not null
|
||||
public final String name;
|
||||
public final Settings definition;
|
||||
|
||||
NameOrDefinition(String name) {
|
||||
this.name = Objects.requireNonNull(name);
|
||||
this.definition = null;
|
||||
}
|
||||
|
||||
NameOrDefinition(Map<String, ?> definition) {
|
||||
this.name = null;
|
||||
Objects.requireNonNull(definition);
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||
builder.map(definition);
|
||||
this.definition = Settings.builder().loadFromSource(builder.string()).build();
|
||||
} catch (IOException e) {
|
||||
throw new IllegalArgumentException("Failed to parse [" + definition + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
NameOrDefinition(StreamInput in) throws IOException {
|
||||
name = in.readOptionalString();
|
||||
if (in.readBoolean()) {
|
||||
definition = Settings.readSettingsFromStream(in);
|
||||
} else {
|
||||
definition = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalString(name);
|
||||
boolean isNotNullDefinition = this.definition != null;
|
||||
out.writeBoolean(isNotNullDefinition);
|
||||
if (isNotNullDefinition) {
|
||||
Settings.writeSettingsToStream(definition, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public AnalyzeRequest() {
|
||||
}
|
||||
|
||||
|
@ -82,35 +132,43 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
}
|
||||
|
||||
public AnalyzeRequest tokenizer(String tokenizer) {
|
||||
this.tokenizer = tokenizer;
|
||||
this.tokenizer = new NameOrDefinition(tokenizer);
|
||||
return this;
|
||||
}
|
||||
|
||||
public String tokenizer() {
|
||||
public AnalyzeRequest tokenizer(Map<String, ?> tokenizer) {
|
||||
this.tokenizer = new NameOrDefinition(tokenizer);
|
||||
return this;
|
||||
}
|
||||
|
||||
public NameOrDefinition tokenizer() {
|
||||
return this.tokenizer;
|
||||
}
|
||||
|
||||
public AnalyzeRequest tokenFilters(String... tokenFilters) {
|
||||
if (tokenFilters == null) {
|
||||
throw new IllegalArgumentException("token filters must not be null");
|
||||
}
|
||||
this.tokenFilters = tokenFilters;
|
||||
public AnalyzeRequest addTokenFilter(String tokenFilter) {
|
||||
this.tokenFilters.add(new NameOrDefinition(tokenFilter));
|
||||
return this;
|
||||
}
|
||||
|
||||
public String[] tokenFilters() {
|
||||
public AnalyzeRequest addTokenFilter(Map<String, ?> tokenFilter) {
|
||||
this.tokenFilters.add(new NameOrDefinition(tokenFilter));
|
||||
return this;
|
||||
}
|
||||
|
||||
public List<NameOrDefinition> tokenFilters() {
|
||||
return this.tokenFilters;
|
||||
}
|
||||
|
||||
public AnalyzeRequest charFilters(String... charFilters) {
|
||||
if (charFilters == null) {
|
||||
throw new IllegalArgumentException("char filters must not be null");
|
||||
}
|
||||
this.charFilters = charFilters;
|
||||
public AnalyzeRequest addCharFilter(Map<String, ?> charFilter) {
|
||||
this.charFilters.add(new NameOrDefinition(charFilter));
|
||||
return this;
|
||||
}
|
||||
|
||||
public String[] charFilters() {
|
||||
public AnalyzeRequest addCharFilter(String charFilter) {
|
||||
this.charFilters.add(new NameOrDefinition(charFilter));
|
||||
return this;
|
||||
}
|
||||
public List<NameOrDefinition> charFilters() {
|
||||
return this.charFilters;
|
||||
}
|
||||
|
||||
|
@ -158,14 +216,12 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
super.readFrom(in);
|
||||
text = in.readStringArray();
|
||||
analyzer = in.readOptionalString();
|
||||
tokenizer = in.readOptionalString();
|
||||
tokenFilters = in.readStringArray();
|
||||
charFilters = in.readStringArray();
|
||||
tokenizer = in.readOptionalWriteable(NameOrDefinition::new);
|
||||
tokenFilters.addAll(in.readList(NameOrDefinition::new));
|
||||
charFilters.addAll(in.readList(NameOrDefinition::new));
|
||||
field = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
|
||||
explain = in.readBoolean();
|
||||
attributes = in.readStringArray();
|
||||
}
|
||||
explain = in.readBoolean();
|
||||
attributes = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -173,13 +229,11 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
super.writeTo(out);
|
||||
out.writeStringArray(text);
|
||||
out.writeOptionalString(analyzer);
|
||||
out.writeOptionalString(tokenizer);
|
||||
out.writeStringArray(tokenFilters);
|
||||
out.writeStringArray(charFilters);
|
||||
out.writeOptionalWriteable(tokenizer);
|
||||
out.writeList(tokenFilters);
|
||||
out.writeList(charFilters);
|
||||
out.writeOptionalString(field);
|
||||
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
|
||||
out.writeBoolean(explain);
|
||||
out.writeStringArray(attributes);
|
||||
}
|
||||
out.writeBoolean(explain);
|
||||
out.writeStringArray(attributes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.action.admin.indices.analyze;
|
|||
import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -54,7 +56,7 @@ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder<An
|
|||
}
|
||||
|
||||
/**
|
||||
* Instead of setting the analyzer, sets the tokenizer that will be used as part of a custom
|
||||
* Instead of setting the analyzer, sets the tokenizer as name that will be used as part of a custom
|
||||
* analyzer.
|
||||
*/
|
||||
public AnalyzeRequestBuilder setTokenizer(String tokenizer) {
|
||||
|
@ -63,18 +65,43 @@ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder<An
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets token filters that will be used on top of a tokenizer provided.
|
||||
* Instead of setting the analyzer, sets the tokenizer using custom settings that will be used as part of a custom
|
||||
* analyzer.
|
||||
*/
|
||||
public AnalyzeRequestBuilder setTokenFilters(String... tokenFilters) {
|
||||
request.tokenFilters(tokenFilters);
|
||||
public AnalyzeRequestBuilder setTokenizer(Map<String, ?> tokenizer) {
|
||||
request.tokenizer(tokenizer);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets char filters that will be used before the tokenizer.
|
||||
* Add token filter setting that will be used on top of a tokenizer provided.
|
||||
*/
|
||||
public AnalyzeRequestBuilder setCharFilters(String... charFilters) {
|
||||
request.charFilters(charFilters);
|
||||
public AnalyzeRequestBuilder addTokenFilter(Map<String, ?> tokenFilter) {
|
||||
request.addTokenFilter(tokenFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a name of token filter that will be used on top of a tokenizer provided.
|
||||
*/
|
||||
public AnalyzeRequestBuilder addTokenFilter(String tokenFilter) {
|
||||
request.addTokenFilter(tokenFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add char filter setting that will be used on top of a tokenizer provided.
|
||||
*/
|
||||
public AnalyzeRequestBuilder addCharFilter(Map<String, ?> charFilter) {
|
||||
request.addCharFilter(charFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a name of char filter that will be used before the tokenizer.
|
||||
*/
|
||||
public AnalyzeRequestBuilder addCharFilter(String tokenFilter) {
|
||||
request.addCharFilter(tokenFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,23 +25,25 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
|
||||
import org.apache.lucene.util.Attribute;
|
||||
import org.apache.lucene.util.AttributeReflector;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FastStringReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
|
@ -167,65 +169,13 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
}
|
||||
|
||||
} else if (request.tokenizer() != null) {
|
||||
TokenizerFactory tokenizerFactory;
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(request.tokenizer());
|
||||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
|
||||
}
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(environment, request.tokenizer());
|
||||
} else {
|
||||
tokenizerFactory = analysisService.tokenizer(request.tokenizer());
|
||||
if (tokenizerFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
|
||||
}
|
||||
}
|
||||
TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, analysisService, analysisRegistry, environment);
|
||||
|
||||
TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
|
||||
if (request.tokenFilters() != null && request.tokenFilters().length > 0) {
|
||||
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().length];
|
||||
for (int i = 0; i < request.tokenFilters().length; i++) {
|
||||
String tokenFilterName = request.tokenFilters()[i];
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilterName);
|
||||
if (tokenFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]");
|
||||
}
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilterName);
|
||||
} else {
|
||||
tokenFilterFactories[i] = analysisService.tokenFilter(tokenFilterName);
|
||||
if (tokenFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
|
||||
}
|
||||
}
|
||||
if (tokenFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
tokenFilterFactories = getTokenFilterFactories(request, analysisService, analysisRegistry, environment, tokenFilterFactories);
|
||||
|
||||
CharFilterFactory[] charFilterFactories = new CharFilterFactory[0];
|
||||
if (request.charFilters() != null && request.charFilters().length > 0) {
|
||||
charFilterFactories = new CharFilterFactory[request.charFilters().length];
|
||||
for (int i = 0; i < request.charFilters().length; i++) {
|
||||
String charFilterName = request.charFilters()[i];
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilterName);
|
||||
if (charFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterName + "]");
|
||||
}
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilterName);
|
||||
} else {
|
||||
charFilterFactories[i] = analysisService.charFilter(charFilterName);
|
||||
if (charFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
|
||||
}
|
||||
}
|
||||
if (charFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
charFilterFactories = getCharFilterFactories(request, analysisService, analysisRegistry, environment, charFilterFactories);
|
||||
|
||||
analyzer = new CustomAnalyzer(tokenizerFactory, charFilterFactories, tokenFilterFactories);
|
||||
closeAnalyzer = true;
|
||||
|
@ -407,8 +357,9 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("failed to analyze (charFiltering)", e);
|
||||
}
|
||||
if (len > 0)
|
||||
if (len > 0) {
|
||||
sb.append(buf, 0, len);
|
||||
}
|
||||
} while (len == BUFFER_SIZE);
|
||||
return sb.toString();
|
||||
}
|
||||
|
@ -436,7 +387,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
lastPosition = lastPosition + increment;
|
||||
}
|
||||
tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(),
|
||||
lastOffset +offset.endOffset(), type.type(), extractExtendedAttributes(stream, includeAttributes)));
|
||||
lastOffset + offset.endOffset(), type.type(), extractExtendedAttributes(stream, includeAttributes)));
|
||||
|
||||
}
|
||||
stream.end();
|
||||
|
@ -470,27 +421,164 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
private static Map<String, Object> extractExtendedAttributes(TokenStream stream, final Set<String> includeAttributes) {
|
||||
final Map<String, Object> extendedAttributes = new TreeMap<>();
|
||||
|
||||
stream.reflectWith(new AttributeReflector() {
|
||||
@Override
|
||||
public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
|
||||
if (CharTermAttribute.class.isAssignableFrom(attClass))
|
||||
return;
|
||||
if (PositionIncrementAttribute.class.isAssignableFrom(attClass))
|
||||
return;
|
||||
if (OffsetAttribute.class.isAssignableFrom(attClass))
|
||||
return;
|
||||
if (TypeAttribute.class.isAssignableFrom(attClass))
|
||||
return;
|
||||
if (includeAttributes == null || includeAttributes.isEmpty() || includeAttributes.contains(key.toLowerCase(Locale.ROOT))) {
|
||||
if (value instanceof BytesRef) {
|
||||
final BytesRef p = (BytesRef) value;
|
||||
value = p.toString();
|
||||
}
|
||||
extendedAttributes.put(key, value);
|
||||
stream.reflectWith((attClass, key, value) -> {
|
||||
if (CharTermAttribute.class.isAssignableFrom(attClass)) {
|
||||
return;
|
||||
}
|
||||
if (PositionIncrementAttribute.class.isAssignableFrom(attClass)) {
|
||||
return;
|
||||
}
|
||||
if (OffsetAttribute.class.isAssignableFrom(attClass)) {
|
||||
return;
|
||||
}
|
||||
if (TypeAttribute.class.isAssignableFrom(attClass)) {
|
||||
return;
|
||||
}
|
||||
if (includeAttributes == null || includeAttributes.isEmpty() || includeAttributes.contains(key.toLowerCase(Locale.ROOT))) {
|
||||
if (value instanceof BytesRef) {
|
||||
final BytesRef p = (BytesRef) value;
|
||||
value = p.toString();
|
||||
}
|
||||
extendedAttributes.put(key, value);
|
||||
}
|
||||
});
|
||||
|
||||
return extendedAttributes;
|
||||
}
|
||||
|
||||
private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
|
||||
Environment environment, CharFilterFactory[] charFilterFactories) throws IOException {
|
||||
if (request.charFilters() != null && request.charFilters().size() > 0) {
|
||||
charFilterFactories = new CharFilterFactory[request.charFilters().size()];
|
||||
for (int i = 0; i < request.charFilters().size(); i++) {
|
||||
final AnalyzeRequest.NameOrDefinition charFilter = request.charFilters().get(i);
|
||||
// parse anonymous settings
|
||||
if (charFilter.definition != null) {
|
||||
Settings settings = getAnonymousSettings(charFilter.definition);
|
||||
String charFilterTypeName = settings.get("type");
|
||||
if (charFilterTypeName == null) {
|
||||
throw new IllegalArgumentException("Missing [type] setting for anonymous char filter: " + charFilter.definition);
|
||||
}
|
||||
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory =
|
||||
analysisRegistry.getCharFilterProvider(charFilterTypeName);
|
||||
if (charFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterTypeName + "]");
|
||||
}
|
||||
// Need to set anonymous "name" of char_filter
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter_[" + i + "]", settings);
|
||||
} else {
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name);
|
||||
if (charFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global char filter under [" + charFilter.name + "]");
|
||||
}
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilter.name);
|
||||
} else {
|
||||
charFilterFactories[i] = analysisService.charFilter(charFilter.name);
|
||||
if (charFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (charFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
return charFilterFactories;
|
||||
}
|
||||
|
||||
private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
|
||||
Environment environment, TokenFilterFactory[] tokenFilterFactories) throws IOException {
|
||||
if (request.tokenFilters() != null && request.tokenFilters().size() > 0) {
|
||||
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().size()];
|
||||
for (int i = 0; i < request.tokenFilters().size(); i++) {
|
||||
final AnalyzeRequest.NameOrDefinition tokenFilter = request.tokenFilters().get(i);
|
||||
// parse anonymous settings
|
||||
if (tokenFilter.definition != null) {
|
||||
Settings settings = getAnonymousSettings(tokenFilter.definition);
|
||||
String filterTypeName = settings.get("type");
|
||||
if (filterTypeName == null) {
|
||||
throw new IllegalArgumentException("Missing [type] setting for anonymous token filter: " + tokenFilter.definition);
|
||||
}
|
||||
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory =
|
||||
analysisRegistry.getTokenFilterProvider(filterTypeName);
|
||||
if (tokenFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global token filter under [" + filterTypeName + "]");
|
||||
}
|
||||
// Need to set anonymous "name" of tokenfilter
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter_[" + i + "]", settings);
|
||||
} else {
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name);
|
||||
|
||||
if (tokenFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilter.name + "]");
|
||||
}
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilter.name);
|
||||
} else {
|
||||
tokenFilterFactories[i] = analysisService.tokenFilter(tokenFilter.name);
|
||||
if (tokenFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilter.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (tokenFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find or create token filter under [" + tokenFilter.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
return tokenFilterFactories;
|
||||
}
|
||||
|
||||
private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, AnalysisService analysisService,
|
||||
AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
|
||||
TokenizerFactory tokenizerFactory;
|
||||
final AnalyzeRequest.NameOrDefinition tokenizer = request.tokenizer();
|
||||
// parse anonymous settings
|
||||
if (tokenizer.definition != null) {
|
||||
Settings settings = getAnonymousSettings(tokenizer.definition);
|
||||
String tokenizerTypeName = settings.get("type");
|
||||
if (tokenizerTypeName == null) {
|
||||
throw new IllegalArgumentException("Missing [type] setting for anonymous tokenizer: " + tokenizer.definition);
|
||||
}
|
||||
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory =
|
||||
analysisRegistry.getTokenizerProvider(tokenizerTypeName);
|
||||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizerTypeName + "]");
|
||||
}
|
||||
// Need to set anonymous "name" of tokenizer
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenizer", settings);
|
||||
} else {
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name);
|
||||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizer.name + "]");
|
||||
}
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(environment, tokenizer.name);
|
||||
} else {
|
||||
tokenizerFactory = analysisService.tokenizer(tokenizer.name);
|
||||
if (tokenizerFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find tokenizer under [" + tokenizer.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
return tokenizerFactory;
|
||||
}
|
||||
|
||||
private static IndexSettings getNaIndexSettings(Settings settings) {
|
||||
IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build();
|
||||
return new IndexSettings(metaData, Settings.EMPTY);
|
||||
}
|
||||
|
||||
private static Settings getAnonymousSettings(Settings providerSetting) {
|
||||
return Settings.builder().put(providerSetting)
|
||||
// for _na_
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -107,7 +107,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to close indices [{}]", t, (Object)concreteIndices);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -55,6 +56,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
|
||||
private final Set<ClusterBlock> blocks = new HashSet<>();
|
||||
|
||||
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
|
||||
|
||||
|
||||
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) {
|
||||
this.originalMessage = originalMessage;
|
||||
|
@ -98,6 +101,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
return this;
|
||||
}
|
||||
|
||||
public CreateIndexClusterStateUpdateRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
return this;
|
||||
}
|
||||
|
||||
public TransportMessage originalMessage() {
|
||||
return originalMessage;
|
||||
}
|
||||
|
@ -142,4 +150,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
public boolean updateAllTypes() {
|
||||
return updateAllTypes;
|
||||
}
|
||||
|
||||
public ActiveShardCount waitForActiveShards() {
|
||||
return waitForActiveShards;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -77,6 +78,8 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
|
||||
private boolean updateAllTypes = false;
|
||||
|
||||
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
|
||||
|
||||
public CreateIndexRequest() {
|
||||
}
|
||||
|
||||
|
@ -364,7 +367,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
throw new ElasticsearchParseException("failed to parse source for create index", e);
|
||||
}
|
||||
} else {
|
||||
settings(new String(source.toBytes(), StandardCharsets.UTF_8));
|
||||
settings(source.utf8ToString());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
@ -440,6 +443,30 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
public ActiveShardCount waitForActiveShards() {
|
||||
return waitForActiveShards;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for index creation to return.
|
||||
* Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy
|
||||
* (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to
|
||||
* wait for all shards (primary and all replicas) to be active before returning.
|
||||
* Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public CreateIndexRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -462,6 +489,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
aliases.add(Alias.read(in));
|
||||
}
|
||||
updateAllTypes = in.readBoolean();
|
||||
waitForActiveShards = ActiveShardCount.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -486,5 +514,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
alias.writeTo(out);
|
||||
}
|
||||
out.writeBoolean(updateAllTypes);
|
||||
waitForActiveShards.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -249,4 +250,23 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
|
|||
request.updateAllTypes(updateAllTypes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for index creation to return.
|
||||
* Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy
|
||||
* (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to
|
||||
* wait for all shards (primary and all replicas) to be active before returning.
|
||||
* Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public CreateIndexRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
request.waitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.create;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -30,22 +31,41 @@ import java.io.IOException;
|
|||
*/
|
||||
public class CreateIndexResponse extends AcknowledgedResponse {
|
||||
|
||||
private boolean shardsAcked;
|
||||
|
||||
protected CreateIndexResponse() {
|
||||
}
|
||||
|
||||
protected CreateIndexResponse(boolean acknowledged) {
|
||||
protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked) {
|
||||
super(acknowledged);
|
||||
assert acknowledged || shardsAcked == false; // if its not acknowledged, then shards acked should be false too
|
||||
this.shardsAcked = shardsAcked;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
shardsAcked = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
out.writeBoolean(shardsAcked);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the requisite number of shards were started before
|
||||
* returning from the index creation operation. If {@link #isAcknowledged()}
|
||||
* is false, then this also returns false.
|
||||
*/
|
||||
public boolean isShardsAcked() {
|
||||
return shardsAcked;
|
||||
}
|
||||
|
||||
public void addCustomFields(XContentBuilder builder) throws IOException {
|
||||
builder.field("shards_acknowledged", isShardsAcked());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
|
@ -31,7 +30,6 @@ import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -77,24 +75,12 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
|
|||
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.updateAllTypes())
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||
.settings(request.settings()).mappings(request.mappings())
|
||||
.aliases(request.aliases()).customs(request.customs());
|
||||
.aliases(request.aliases()).customs(request.customs())
|
||||
.waitForActiveShards(request.waitForActiveShards());
|
||||
|
||||
createIndexService.createIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
listener.onResponse(new CreateIndexResponse(response.isAcknowledged()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (t instanceof IndexAlreadyExistsException) {
|
||||
logger.trace("[{}] failed to create", t, request.index());
|
||||
} else {
|
||||
logger.debug("[{}] failed to create", t, request.index());
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
createIndexService.createIndex(updateRequest, ActionListener.wrap(response ->
|
||||
listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcked())),
|
||||
listener::onFailure));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to delete indices [{}]", t, concreteIndices);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction<GetF
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
int index = indexCounter.getAndIncrement();
|
||||
indexResponses.set(index, e);
|
||||
if (completionCounter.decrementAndGet() == 0) {
|
||||
|
|
|
@ -130,7 +130,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
|||
|
||||
private static final ToXContent.Params includeDefaultsParams = new ToXContent.Params() {
|
||||
|
||||
final static String INCLUDE_DEFAULTS = "include_defaults";
|
||||
static final String INCLUDE_DEFAULTS = "include_defaults";
|
||||
|
||||
@Override
|
||||
public String param(String key) {
|
||||
|
|
|
@ -91,7 +91,7 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to put mappings on indices [{}], type [{}]", t, concreteIndices, request.type());
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
|
|
@ -92,7 +92,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to open indices [{}]", t, (Object)concreteIndices);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import java.io.IOException;
|
|||
* when the index is at least {@link #value} old
|
||||
*/
|
||||
public class MaxAgeCondition extends Condition<TimeValue> {
|
||||
public final static String NAME = "max_age";
|
||||
public static final String NAME = "max_age";
|
||||
|
||||
public MaxAgeCondition(TimeValue value) {
|
||||
super(NAME);
|
||||
|
|
|
@ -29,7 +29,7 @@ import java.io.IOException;
|
|||
* when the index has at least {@link #value} docs
|
||||
*/
|
||||
public class MaxDocsCondition extends Condition<Long> {
|
||||
public final static String NAME = "max_docs";
|
||||
public static final String NAME = "max_docs";
|
||||
|
||||
public MaxDocsCondition(Long value) {
|
||||
super(NAME);
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchParseException;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
@ -206,4 +207,22 @@ public class RolloverRequest extends AcknowledgedRequest<RolloverRequest> implem
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for creation of the
|
||||
* new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
|
||||
* wait for one shard copy (the primary) to become active. Set this value to
|
||||
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
|
||||
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.createIndexRequest.waitForActiveShards(waitForActiveShards);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.action.admin.indices.rollover;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -70,4 +71,23 @@ public class RolloverRequestBuilder extends MasterNodeOperationRequestBuilder<Ro
|
|||
this.request.getCreateIndexRequest().mapping(type, source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for creation of the
|
||||
* new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
|
||||
* wait for one shard copy (the primary) to become active. Set this value to
|
||||
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
|
||||
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public RolloverRequestBuilder waitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.request.setWaitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,22 +39,28 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
private static final String DRY_RUN = "dry_run";
|
||||
private static final String ROLLED_OVER = "rolled_over";
|
||||
private static final String CONDITIONS = "conditions";
|
||||
private static final String ACKNOWLEDGED = "acknowledged";
|
||||
private static final String SHARDS_ACKED = "shards_acknowledged";
|
||||
|
||||
private String oldIndex;
|
||||
private String newIndex;
|
||||
private Set<Map.Entry<String, Boolean>> conditionStatus;
|
||||
private boolean dryRun;
|
||||
private boolean rolledOver;
|
||||
private boolean acknowledged;
|
||||
private boolean shardsAcked;
|
||||
|
||||
RolloverResponse() {
|
||||
}
|
||||
|
||||
RolloverResponse(String oldIndex, String newIndex, Set<Condition.Result> conditionResults,
|
||||
boolean dryRun, boolean rolledOver) {
|
||||
boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcked) {
|
||||
this.oldIndex = oldIndex;
|
||||
this.newIndex = newIndex;
|
||||
this.dryRun = dryRun;
|
||||
this.rolledOver = rolledOver;
|
||||
this.acknowledged = acknowledged;
|
||||
this.shardsAcked = shardsAcked;
|
||||
this.conditionStatus = conditionResults.stream()
|
||||
.map(result -> new AbstractMap.SimpleEntry<>(result.condition.toString(), result.matched))
|
||||
.collect(Collectors.toSet());
|
||||
|
@ -89,12 +95,31 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns if the rollover was not simulated and the conditions were met
|
||||
* Returns true if the rollover was not simulated and the conditions were met
|
||||
*/
|
||||
public boolean isRolledOver() {
|
||||
return rolledOver;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the creation of the new rollover index and switching of the
|
||||
* alias to the newly created index was successful, and returns false otherwise.
|
||||
* If {@link #isDryRun()} is true, then this will also return false. If this
|
||||
* returns false, then {@link #isShardsAcked()} will also return false.
|
||||
*/
|
||||
public boolean isAcknowledged() {
|
||||
return acknowledged;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the requisite number of shards were started in the newly
|
||||
* created rollover index before returning. If {@link #isAcknowledged()} is
|
||||
* false, then this will also return false.
|
||||
*/
|
||||
public boolean isShardsAcked() {
|
||||
return shardsAcked;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -110,6 +135,8 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
conditionStatus = conditions;
|
||||
dryRun = in.readBoolean();
|
||||
rolledOver = in.readBoolean();
|
||||
acknowledged = in.readBoolean();
|
||||
shardsAcked = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -124,6 +151,8 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
}
|
||||
out.writeBoolean(dryRun);
|
||||
out.writeBoolean(rolledOver);
|
||||
out.writeBoolean(acknowledged);
|
||||
out.writeBoolean(shardsAcked);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -132,6 +161,8 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
builder.field(NEW_INDEX, newIndex);
|
||||
builder.field(ROLLED_OVER, rolledOver);
|
||||
builder.field(DRY_RUN, dryRun);
|
||||
builder.field(ACKNOWLEDGED, acknowledged);
|
||||
builder.field(SHARDS_ACKED, shardsAcked);
|
||||
builder.startObject(CONDITIONS);
|
||||
for (Map.Entry<String, Boolean> entry : conditionStatus) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
|
|
|
@ -25,11 +25,12 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpda
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.ActiveShardsObserver;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction;
|
||||
|
@ -43,7 +44,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -59,6 +59,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
private static final Pattern INDEX_NAME_PATTERN = Pattern.compile("^.*-(\\d)+$");
|
||||
private final MetaDataCreateIndexService createIndexService;
|
||||
private final MetaDataIndexAliasesService indexAliasesService;
|
||||
private final ActiveShardsObserver activeShardsObserver;
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
|
@ -71,6 +72,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
this.createIndexService = createIndexService;
|
||||
this.indexAliasesService = indexAliasesService;
|
||||
this.client = client;
|
||||
this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -111,48 +113,40 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
: generateRolloverIndexName(sourceIndexName);
|
||||
if (rolloverRequest.isDryRun()) {
|
||||
listener.onResponse(
|
||||
new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false));
|
||||
new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false, false, false));
|
||||
return;
|
||||
}
|
||||
if (conditionResults.size() == 0 || conditionResults.stream().anyMatch(result -> result.matched)) {
|
||||
createIndexService.createIndex(prepareCreateIndexRequest(rolloverIndexName, rolloverRequest),
|
||||
new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
// switch the alias to point to the newly created index
|
||||
indexAliasesService.indicesAliases(
|
||||
prepareRolloverAliasesUpdateRequest(sourceIndexName, rolloverIndexName,
|
||||
rolloverRequest),
|
||||
new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) {
|
||||
listener.onResponse(
|
||||
new RolloverResponse(sourceIndexName, rolloverIndexName,
|
||||
conditionResults, false, true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(rolloverIndexName, rolloverRequest);
|
||||
createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> {
|
||||
// switch the alias to point to the newly created index
|
||||
indexAliasesService.indicesAliases(
|
||||
prepareRolloverAliasesUpdateRequest(sourceIndexName, rolloverIndexName,
|
||||
rolloverRequest),
|
||||
ActionListener.wrap(aliasClusterStateUpdateResponse -> {
|
||||
if (aliasClusterStateUpdateResponse.isAcknowledged()) {
|
||||
activeShardsObserver.waitForActiveShards(rolloverIndexName,
|
||||
rolloverRequest.getCreateIndexRequest().waitForActiveShards(),
|
||||
rolloverRequest.masterNodeTimeout(),
|
||||
isShardsAcked -> listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName,
|
||||
conditionResults, false, true, true, isShardsAcked)),
|
||||
listener::onFailure);
|
||||
} else {
|
||||
listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults,
|
||||
false, true, false, false));
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}, listener::onFailure));
|
||||
} else {
|
||||
// conditions not met
|
||||
listener.onResponse(
|
||||
new RolloverResponse(sourceIndexName, sourceIndexName, conditionResults, false, false)
|
||||
new RolloverResponse(sourceIndexName, sourceIndexName, conditionResults, false, false, false, false)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
@ -217,6 +211,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
.masterNodeTimeout(createIndexRequest.masterNodeTimeout())
|
||||
.settings(createIndexRequest.settings())
|
||||
.aliases(createIndexRequest.aliases())
|
||||
.waitForActiveShards(ActiveShardCount.NONE) // not waiting for shards here, will wait on the alias switch operation
|
||||
.mappings(createIndexRequest.mappings());
|
||||
}
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
public void onFailure(Exception t) {
|
||||
logger.debug("failed to update settings on indices [{}]", t, (Object)concreteIndices);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
|
|
@ -57,7 +57,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
private DiscoveryNode node;
|
||||
private long legacyVersion;
|
||||
private String allocationId;
|
||||
private Throwable storeException;
|
||||
private Exception storeException;
|
||||
private AllocationStatus allocationStatus;
|
||||
|
||||
/**
|
||||
|
@ -116,7 +116,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
private StoreStatus() {
|
||||
}
|
||||
|
||||
public StoreStatus(DiscoveryNode node, long legacyVersion, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
|
||||
public StoreStatus(DiscoveryNode node, long legacyVersion, String allocationId, AllocationStatus allocationStatus, Exception storeException) {
|
||||
this.node = node;
|
||||
this.legacyVersion = legacyVersion;
|
||||
this.allocationId = allocationId;
|
||||
|
@ -150,7 +150,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
* Exception while trying to open the
|
||||
* shard index or from when the shard failed
|
||||
*/
|
||||
public Throwable getStoreException() {
|
||||
public Exception getStoreException() {
|
||||
return storeException;
|
||||
}
|
||||
|
||||
|
@ -177,7 +177,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
allocationId = in.readOptionalString();
|
||||
allocationStatus = AllocationStatus.readFrom(in);
|
||||
if (in.readBoolean()) {
|
||||
storeException = in.readThrowable();
|
||||
storeException = in.readException();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -189,7 +189,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
|
|||
allocationStatus.writeTo(out);
|
||||
if (storeException != null) {
|
||||
out.writeBoolean(true);
|
||||
out.writeThrowable(storeException);
|
||||
out.writeException(storeException);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterShardHealth;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
@ -93,12 +94,14 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
logger.trace("using cluster state version [{}] to determine shards", state.version());
|
||||
// collect relevant shard ids of the requested indices for fetching store infos
|
||||
for (String index : concreteIndices) {
|
||||
IndexMetaData indexMetaData = state.metaData().index(index);
|
||||
IndexRoutingTable indexShardRoutingTables = routingTables.index(index);
|
||||
if (indexShardRoutingTables == null) {
|
||||
continue;
|
||||
}
|
||||
for (IndexShardRoutingTable routing : indexShardRoutingTables) {
|
||||
ClusterShardHealth shardHealth = new ClusterShardHealth(routing.shardId().id(), routing);
|
||||
final int shardId = routing.shardId().id();
|
||||
ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing, indexMetaData);
|
||||
if (request.shardStatuses().contains(shardHealth.getStatus())) {
|
||||
shardIdsToFetch.add(routing.shardId());
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchParseException;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
@ -36,7 +37,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
@ -126,6 +126,24 @@ public class ShrinkRequest extends AcknowledgedRequest<ShrinkRequest> implements
|
|||
return sourceIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for creation of the
|
||||
* new shrunken index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
|
||||
* wait for one shard copy (the primary) to become active. Set this value to
|
||||
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
|
||||
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link ShrinkResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.getShrinkIndexRequest().waitForActiveShards(waitForActiveShards);
|
||||
}
|
||||
|
||||
public void source(BytesReference source) {
|
||||
XContentType xContentType = XContentFactory.xContentType(source);
|
||||
if (xContentType != null) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.action.admin.indices.shrink;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -44,4 +45,23 @@ public class ShrinkRequestBuilder extends AcknowledgedRequestBuilder<ShrinkReque
|
|||
this.request.getShrinkIndexRequest().settings(settings);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for creation of the
|
||||
* new shrunken index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
|
||||
* wait for one shard copy (the primary) to become active. Set this value to
|
||||
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
|
||||
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link ShrinkResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public ShrinkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.request.setWaitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ public final class ShrinkResponse extends CreateIndexResponse {
|
|||
ShrinkResponse() {
|
||||
}
|
||||
|
||||
ShrinkResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
ShrinkResponse(boolean acknowledged, boolean shardsAcked) {
|
||||
super(acknowledged, shardsAcked);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -40,7 +39,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -93,26 +91,12 @@ public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkReque
|
|||
IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i);
|
||||
return shard == null ? null : shard.getPrimary().getDocs();
|
||||
}, indexNameExpressionResolver);
|
||||
createIndexService.createIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
listener.onResponse(new ShrinkResponse(response.isAcknowledged()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (t instanceof IndexAlreadyExistsException) {
|
||||
logger.trace("[{}] failed to create shrink index", t, updateRequest.index());
|
||||
} else {
|
||||
logger.debug("[{}] failed to create shrink index", t, updateRequest.index());
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
createIndexService.createIndex(updateRequest, ActionListener.wrap(response ->
|
||||
listener.onResponse(new ShrinkResponse(response.isAcknowledged(), response.isShardsAcked())), listener::onFailure));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
@ -162,6 +146,7 @@ public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkReque
|
|||
.settings(targetIndex.settings())
|
||||
.aliases(targetIndex.aliases())
|
||||
.customs(targetIndex.customs())
|
||||
.waitForActiveShards(targetIndex.waitForActiveShards())
|
||||
.shrinkFrom(metaData.getIndex());
|
||||
}
|
||||
|
||||
|
|
|
@ -31,8 +31,8 @@ import java.util.EnumSet;
|
|||
*/
|
||||
public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
|
||||
public final static CommonStatsFlags ALL = new CommonStatsFlags().all();
|
||||
public final static CommonStatsFlags NONE = new CommonStatsFlags().clear();
|
||||
public static final CommonStatsFlags ALL = new CommonStatsFlags().all();
|
||||
public static final CommonStatsFlags NONE = new CommonStatsFlags().clear();
|
||||
|
||||
private EnumSet<Flag> flags = EnumSet.allOf(Flag.class);
|
||||
private String[] types = null;
|
||||
|
|
|
@ -72,9 +72,9 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeActio
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.debug("failed to delete templates [{}]", t, request.name());
|
||||
listener.onFailure(t);
|
||||
public void onFailure(Exception e) {
|
||||
logger.debug("failed to delete templates [{}]", e, request.name());
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -93,9 +93,9 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
logger.debug("failed to put template [{}]", t, request.name());
|
||||
listener.onFailure(t);
|
||||
public void onFailure(Exception e) {
|
||||
logger.debug("failed to put template [{}]", e, request.name());
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -190,13 +190,13 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
|||
} else {
|
||||
updateSettings(upgradeResponse, listener);
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
listener.onFailure(t);
|
||||
} catch (Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
};
|
||||
|
@ -212,7 +212,7 @@ public class TransportUpgradeAction extends TransportBroadcastByNodeAction<Upgra
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue