Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
2d57fd10b1
|
@ -3,7 +3,11 @@ GitHub is reserved for bug reports and feature requests. The best place
|
|||
to ask a general question is at the Elastic Discourse forums at
|
||||
https://discuss.elastic.co. If you are in fact posting a bug report or
|
||||
a feature request, please include one and only one of the below blocks
|
||||
in your new issue.
|
||||
in your new issue. Note that whether you're filing a bug report or a
|
||||
feature request, ensure that your submission is for an
|
||||
[OS that we support](https://www.elastic.co/support/matrix#show_os).
|
||||
Bug reports on an OS that we do not support or feature requests
|
||||
specific to an OS that we do not support will be closed.
|
||||
-->
|
||||
|
||||
<!--
|
||||
|
|
|
@ -60,8 +60,8 @@ Vagrant.configure(2) do |config|
|
|||
config.vm.box = "elastic/oraclelinux-7-x86_64"
|
||||
rpm_common config
|
||||
end
|
||||
config.vm.define "fedora-22" do |config|
|
||||
config.vm.box = "elastic/fedora-22-x86_64"
|
||||
config.vm.define "fedora-24" do |config|
|
||||
config.vm.box = "elastic/fedora-24-x86_64"
|
||||
dnf_common config
|
||||
end
|
||||
config.vm.define "opensuse-13" do |config|
|
||||
|
|
|
@ -173,6 +173,11 @@ subprojects {
|
|||
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
|
||||
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
|
||||
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
|
||||
// for transport client
|
||||
"org.elasticsearch.plugin:transport-netty3-client:${version}": ':modules:transport-netty3',
|
||||
"org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex',
|
||||
"org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache',
|
||||
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
|
||||
]
|
||||
configurations.all {
|
||||
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.gradle
|
||||
|
||||
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
|
||||
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.JavaVersion
|
||||
|
@ -35,6 +34,7 @@ import org.gradle.api.artifacts.ResolvedArtifact
|
|||
import org.gradle.api.artifacts.dsl.RepositoryHandler
|
||||
import org.gradle.api.artifacts.maven.MavenPom
|
||||
import org.gradle.api.publish.maven.MavenPublication
|
||||
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
|
||||
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
|
||||
import org.gradle.api.tasks.bundling.Jar
|
||||
import org.gradle.api.tasks.compile.JavaCompile
|
||||
|
@ -344,7 +344,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
|
||||
/**Configuration generation of maven poms. */
|
||||
public static void configurePomGeneration(Project project) {
|
||||
project.plugins.withType(MavenBasePublishPlugin.class).whenPluginAdded {
|
||||
project.plugins.withType(MavenPublishPlugin.class).whenPluginAdded {
|
||||
project.publishing {
|
||||
publications {
|
||||
all { MavenPublication publication -> // we only deal with maven
|
||||
|
|
|
@ -184,13 +184,6 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
|
|||
current.println('---')
|
||||
current.println("setup:")
|
||||
body(setup, true)
|
||||
// always wait for yellow before anything is executed
|
||||
current.println(
|
||||
" - do:\n" +
|
||||
" raw:\n" +
|
||||
" method: GET\n" +
|
||||
" path: \"_cluster/health\"\n" +
|
||||
" wait_for_status: \"yellow\"")
|
||||
}
|
||||
|
||||
private void body(Snippet snippet, boolean inSetup) {
|
||||
|
|
|
@ -18,14 +18,23 @@
|
|||
*/
|
||||
package org.elasticsearch.gradle.plugin
|
||||
|
||||
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
|
||||
import nebula.plugin.publishing.maven.MavenScmPlugin
|
||||
import org.elasticsearch.gradle.BuildPlugin
|
||||
import org.elasticsearch.gradle.test.RestIntegTestTask
|
||||
import org.elasticsearch.gradle.test.RunTask
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.XmlProvider
|
||||
import org.gradle.api.publish.maven.MavenPublication
|
||||
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
|
||||
import org.gradle.api.tasks.SourceSet
|
||||
import org.gradle.api.tasks.bundling.Zip
|
||||
|
||||
import java.nio.file.Files
|
||||
import java.nio.file.Path
|
||||
import java.nio.file.StandardCopyOption
|
||||
import java.util.regex.Matcher
|
||||
import java.util.regex.Pattern
|
||||
|
||||
/**
|
||||
* Encapsulates build configuration for an Elasticsearch plugin.
|
||||
*/
|
||||
|
@ -38,19 +47,35 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
// this afterEvaluate must happen before the afterEvaluate added by integTest creation,
|
||||
// so that the file name resolution for installing the plugin will be setup
|
||||
project.afterEvaluate {
|
||||
boolean isModule = project.path.startsWith(':modules:')
|
||||
String name = project.pluginProperties.extension.name
|
||||
project.jar.baseName = name
|
||||
project.bundlePlugin.baseName = name
|
||||
|
||||
if (project.pluginProperties.extension.hasClientJar) {
|
||||
// for plugins which work with the transport client, we copy the jar
|
||||
// file to a new name, copy the nebula generated pom to the same name,
|
||||
// and generate a different pom for the zip
|
||||
project.signArchives.enabled = false
|
||||
addJarPomGeneration(project)
|
||||
addClientJarTask(project)
|
||||
if (isModule == false) {
|
||||
addZipPomGeneration(project)
|
||||
}
|
||||
} else {
|
||||
// no client plugin, so use the pom file from nebula, without jar, for the zip
|
||||
project.ext.set("nebulaPublish.maven.jar", false)
|
||||
}
|
||||
|
||||
project.integTest.dependsOn(project.bundlePlugin)
|
||||
project.tasks.run.dependsOn(project.bundlePlugin)
|
||||
if (project.path.startsWith(':modules:')) {
|
||||
if (isModule) {
|
||||
project.integTest.clusterConfig.module(project)
|
||||
project.tasks.run.clusterConfig.module(project)
|
||||
} else {
|
||||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
addPomGeneration(project)
|
||||
project.integTest.clusterConfig.plugin(project.path)
|
||||
project.tasks.run.clusterConfig.plugin(project.path)
|
||||
addZipPomGeneration(project)
|
||||
}
|
||||
|
||||
project.namingConventions {
|
||||
|
@ -60,6 +85,7 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
}
|
||||
createIntegTestTask(project)
|
||||
createBundleTask(project)
|
||||
project.configurations.getByName('default').extendsFrom(project.configurations.getByName('runtime'))
|
||||
project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build
|
||||
}
|
||||
|
||||
|
@ -118,40 +144,93 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
}
|
||||
project.assemble.dependsOn(bundle)
|
||||
|
||||
// remove jar from the archives (things that will be published), and set it to the zip
|
||||
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
|
||||
project.artifacts.add('archives', bundle)
|
||||
|
||||
// also make the zip the default artifact (used when depending on this project)
|
||||
project.configurations.getByName('default').extendsFrom = []
|
||||
project.artifacts.add('default', bundle)
|
||||
// also make the zip available as a configuration (used when depending on this project)
|
||||
project.configurations.create('zip')
|
||||
project.artifacts.add('zip', bundle)
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds the plugin jar and zip as publications.
|
||||
*/
|
||||
protected static void addPomGeneration(Project project) {
|
||||
project.plugins.apply(MavenBasePublishPlugin.class)
|
||||
project.plugins.apply(MavenScmPlugin.class)
|
||||
/** Adds a task to move jar and associated files to a "-client" name. */
|
||||
protected static void addClientJarTask(Project project) {
|
||||
Task clientJar = project.tasks.create('clientJar')
|
||||
clientJar.dependsOn('generatePomFileForJarPublication', project.jar, project.javadocJar, project.sourcesJar)
|
||||
clientJar.doFirst {
|
||||
Path jarFile = project.jar.outputs.files.singleFile.toPath()
|
||||
String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}")
|
||||
Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING)
|
||||
|
||||
String pomFileName = jarFile.fileName.toString().replace('.jar', '.pom')
|
||||
String clientPomFileName = clientFileName.replace('.jar', '.pom')
|
||||
Files.copy(jarFile.resolveSibling(pomFileName), jarFile.resolveSibling(clientPomFileName),
|
||||
StandardCopyOption.REPLACE_EXISTING)
|
||||
|
||||
String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar')
|
||||
String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar')
|
||||
Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName),
|
||||
StandardCopyOption.REPLACE_EXISTING)
|
||||
|
||||
String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar')
|
||||
String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar')
|
||||
Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName),
|
||||
StandardCopyOption.REPLACE_EXISTING)
|
||||
}
|
||||
project.assemble.dependsOn(clientJar)
|
||||
}
|
||||
|
||||
static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/)
|
||||
|
||||
/** Find the reponame. */
|
||||
protected static String urlFromOrigin(String origin) {
|
||||
if (origin.startsWith('https')) {
|
||||
return origin
|
||||
}
|
||||
Matcher matcher = GIT_PATTERN.matcher(origin)
|
||||
if (matcher.matches()) {
|
||||
return "https://${matcher.group(1)}/${matcher.group(2)}"
|
||||
} else {
|
||||
return origin // best effort, the url doesnt really matter, it is just required by maven central
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds nebula publishing task to generate a pom file for the plugin. */
|
||||
protected static void addJarPomGeneration(Project project) {
|
||||
project.plugins.apply(MavenPublishPlugin.class)
|
||||
|
||||
project.publishing {
|
||||
publications {
|
||||
nebula {
|
||||
artifact project.bundlePlugin
|
||||
pom.withXml {
|
||||
// overwrite the name/description in the pom nebula set up
|
||||
Node root = asNode()
|
||||
for (Node node : root.children()) {
|
||||
if (node.name() == 'name') {
|
||||
node.setValue(project.pluginProperties.extension.name)
|
||||
} else if (node.name() == 'description') {
|
||||
node.setValue(project.pluginProperties.extension.description)
|
||||
}
|
||||
}
|
||||
jar(MavenPublication) {
|
||||
from project.components.java
|
||||
pom.withXml { XmlProvider xml ->
|
||||
Node root = xml.asNode()
|
||||
root.appendNode('name', project.pluginProperties.extension.name)
|
||||
root.appendNode('description', project.pluginProperties.extension.description)
|
||||
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
|
||||
Node scmNode = root.appendNode('scm')
|
||||
scmNode.appendNode('url', project.scminfo.origin)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Adds a task to generate a*/
|
||||
protected void addZipPomGeneration(Project project) {
|
||||
project.plugins.apply(MavenPublishPlugin.class)
|
||||
|
||||
project.publishing {
|
||||
publications {
|
||||
zip(MavenPublication) {
|
||||
artifact project.bundlePlugin
|
||||
pom.packaging = 'pom'
|
||||
pom.withXml { XmlProvider xml ->
|
||||
Node root = xml.asNode()
|
||||
root.appendNode('name', project.pluginProperties.extension.name)
|
||||
root.appendNode('description', project.pluginProperties.extension.description)
|
||||
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
|
||||
Node scmNode = root.appendNode('scm')
|
||||
scmNode.appendNode('url', project.scminfo.origin)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,6 +39,10 @@ class PluginPropertiesExtension {
|
|||
@Input
|
||||
String classname
|
||||
|
||||
/** Indicates whether the plugin jar should be made available for the transport client. */
|
||||
@Input
|
||||
boolean hasClientJar = false
|
||||
|
||||
PluginPropertiesExtension(Project project) {
|
||||
name = project.name
|
||||
version = project.version
|
||||
|
|
|
@ -20,12 +20,15 @@ package org.elasticsearch.gradle.test
|
|||
|
||||
import org.gradle.api.GradleException
|
||||
import org.gradle.api.Project
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
|
||||
/** Configuration for an elasticsearch cluster, used for integration tests. */
|
||||
class ClusterConfiguration {
|
||||
|
||||
private final Project project
|
||||
|
||||
@Input
|
||||
String distribution = 'integ-test-zip'
|
||||
|
||||
|
@ -77,6 +80,10 @@ class ClusterConfiguration {
|
|||
return tmpFile.exists()
|
||||
}
|
||||
|
||||
public ClusterConfiguration(Project project) {
|
||||
this.project = project
|
||||
}
|
||||
|
||||
Map<String, String> systemProperties = new HashMap<>()
|
||||
|
||||
Map<String, String> settings = new HashMap<>()
|
||||
|
@ -84,7 +91,7 @@ class ClusterConfiguration {
|
|||
// map from destination path, to source file
|
||||
Map<String, Object> extraConfigFiles = new HashMap<>()
|
||||
|
||||
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
|
||||
LinkedHashMap<String, Project> plugins = new LinkedHashMap<>()
|
||||
|
||||
List<Project> modules = new ArrayList<>()
|
||||
|
||||
|
@ -101,13 +108,9 @@ class ClusterConfiguration {
|
|||
}
|
||||
|
||||
@Input
|
||||
void plugin(String name, FileCollection file) {
|
||||
plugins.put(name, file)
|
||||
}
|
||||
|
||||
@Input
|
||||
void plugin(String name, Project pluginProject) {
|
||||
plugins.put(name, pluginProject)
|
||||
void plugin(String path) {
|
||||
Project pluginProject = project.project(path)
|
||||
plugins.put(pluginProject.name, pluginProject)
|
||||
}
|
||||
|
||||
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */
|
||||
|
|
|
@ -167,7 +167,7 @@ class ClusterFormationTasks {
|
|||
}
|
||||
|
||||
// install plugins
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
|
||||
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
|
||||
}
|
||||
|
@ -326,38 +326,34 @@ class ClusterFormationTasks {
|
|||
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
|
||||
|
||||
List<FileCollection> pluginFiles = []
|
||||
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
|
||||
FileCollection pluginZip
|
||||
if (plugin.getValue() instanceof Project) {
|
||||
Project pluginProject = plugin.getValue()
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
|
||||
}
|
||||
String configurationName = "_plugin_${pluginProject.path}"
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
project.dependencies.add(configurationName, pluginProject)
|
||||
setup.dependsOn(pluginProject.tasks.bundlePlugin)
|
||||
pluginZip = configuration
|
||||
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
|
||||
|
||||
// also allow rest tests to use the rest spec from the plugin
|
||||
Copy copyRestSpec = null
|
||||
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
|
||||
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
|
||||
if (restApiDir.exists() == false) continue
|
||||
if (copyRestSpec == null) {
|
||||
copyRestSpec = project.tasks.create(name: pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec'), type: Copy)
|
||||
copyPlugins.dependsOn(copyRestSpec)
|
||||
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
|
||||
}
|
||||
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
|
||||
}
|
||||
} else {
|
||||
pluginZip = plugin.getValue()
|
||||
Project pluginProject = plugin.getValue()
|
||||
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
|
||||
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
|
||||
}
|
||||
pluginFiles.add(pluginZip)
|
||||
String configurationName = "_plugin_${pluginProject.path}"
|
||||
Configuration configuration = project.configurations.findByName(configurationName)
|
||||
if (configuration == null) {
|
||||
configuration = project.configurations.create(configurationName)
|
||||
}
|
||||
project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip'))
|
||||
setup.dependsOn(pluginProject.tasks.bundlePlugin)
|
||||
|
||||
// also allow rest tests to use the rest spec from the plugin
|
||||
String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec')
|
||||
Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName)
|
||||
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
|
||||
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
|
||||
if (restApiDir.exists() == false) continue
|
||||
if (copyRestSpec == null) {
|
||||
copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy)
|
||||
copyPlugins.dependsOn(copyRestSpec)
|
||||
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
|
||||
}
|
||||
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
|
||||
}
|
||||
pluginFiles.add(configuration)
|
||||
}
|
||||
|
||||
copyPlugins.into(node.pluginsTmpDir)
|
||||
|
@ -379,15 +375,10 @@ class ClusterFormationTasks {
|
|||
return installModule
|
||||
}
|
||||
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Object plugin) {
|
||||
FileCollection pluginZip
|
||||
if (plugin instanceof Project) {
|
||||
pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
|
||||
} else {
|
||||
pluginZip = plugin
|
||||
}
|
||||
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin) {
|
||||
FileCollection pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
|
||||
// delay reading the file location until execution time by wrapping in a closure within a GString
|
||||
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
|
||||
Object[] args = [new File(node.homeDir, 'bin/elasticsearch-plugin'), 'install', file]
|
||||
return configureExecTask(name, project, setup, node, args)
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.gradle.util.ConfigureUtil
|
|||
*/
|
||||
public class RestIntegTestTask extends RandomizedTestingTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration()
|
||||
ClusterConfiguration clusterConfig
|
||||
|
||||
/** Flag indicating whether the rest tests in the rest spec should be run. */
|
||||
@Input
|
||||
|
@ -44,6 +44,7 @@ public class RestIntegTestTask extends RandomizedTestingTask {
|
|||
dependsOn(project.testClasses)
|
||||
classpath = project.sourceSets.test.runtimeClasspath
|
||||
testClassesDir = project.sourceSets.test.output.classesDir
|
||||
clusterConfig = new ClusterConfiguration(project)
|
||||
|
||||
// start with the common test configuration
|
||||
configure(BuildPlugin.commonTestConfig(project))
|
||||
|
|
|
@ -7,11 +7,15 @@ import org.gradle.util.ConfigureUtil
|
|||
|
||||
public class RunTask extends DefaultTask {
|
||||
|
||||
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
|
||||
ClusterConfiguration clusterConfig
|
||||
|
||||
public RunTask() {
|
||||
description = "Runs elasticsearch with '${project.path}'"
|
||||
group = 'Verification'
|
||||
clusterConfig = new ClusterConfiguration(project)
|
||||
clusterConfig.httpPort = 9200
|
||||
clusterConfig.transportPort = 9300
|
||||
clusterConfig.daemonize = false
|
||||
project.afterEvaluate {
|
||||
ClusterFormationTasks.setup(project, this, clusterConfig)
|
||||
}
|
||||
|
|
|
@ -233,7 +233,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]bootstrap[/\\]Security.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]ElasticsearchClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]FilterClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]node[/\\]NodeClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]support[/\\]AbstractClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClient.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]support[/\\]TransportProxyClient.java" checks="LineLength" />
|
||||
|
@ -556,7 +555,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalAggregation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]ValuesSourceAggregationBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]BucketsAggregator.java" checks="LineLength" />
|
||||
|
@ -576,10 +574,7 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]InternalReverseNested.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]ReverseNestedAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]InternalRange.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]RangeAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]date[/\\]InternalDateRange.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]ipv4[/\\]InternalIPv4Range.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedBytesHashSamplerAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedMapSamplerAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedNumericSamplerAggregator.java" checks="LineLength" />
|
||||
|
@ -587,37 +582,27 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]InternalSampler.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]SamplerAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]GlobalOrdinalsSignificantTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]InternalSignificantTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantLongTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantStringTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsAggregatorFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsParametersParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]UnmappedSignificantTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]GND.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]NXYSignificanceHeuristic.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]PercentageScore.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]ScriptHeuristic.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]SignificanceHeuristic.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]AbstractTermsParametersParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]DoubleTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]DoubleTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]GlobalOrdinalsStringTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]InternalOrder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]InternalTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]LongTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]LongTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]StringTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]StringTermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsAggregatorFactory.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]TermsParametersParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]UnmappedTerms.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]terms[/\\]support[/\\]IncludeExclude.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]ValuesSourceMetricsAggregationBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]CardinalityAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]HyperLogLogPlusPlus.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geobounds[/\\]GeoBoundsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geobounds[/\\]InternalGeoBounds.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]AbstractTDigestPercentilesAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentileRanksAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentilesAggregator.java" checks="LineLength" />
|
||||
|
@ -625,7 +610,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]stats[/\\]extended[/\\]ExtendedStatsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]tophits[/\\]TopHitsAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]bucketscript[/\\]BucketScriptPipelineAggregator.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]derivative[/\\]InternalDerivative.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]AggregationPath.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]ValuesSourceParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueFormat.java" checks="LineLength" />
|
||||
|
@ -756,7 +740,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]allocation[/\\]ShardsAllocatorModuleIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]allocation[/\\]SimpleAllocationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]health[/\\]ClusterIndexHealthTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]health[/\\]ClusterStateHealthTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]AutoExpandReplicasTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]DateMathExpressionResolverTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]HumanReadableIndexSettingsTests.java" checks="LineLength" />
|
||||
|
@ -862,7 +845,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReplicaShardAllocatorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReusePeerRecoverySharedTest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]get[/\\]GetActionIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]http[/\\]netty[/\\]NettyHttpServerPipeliningTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexModuleTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexServiceTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexWithShadowReplicasIT.java" checks="LineLength" />
|
||||
|
@ -1053,7 +1035,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsDocCountErrorIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsShardMinDocCountIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregatorTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificanceHeuristicTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AbstractGeoTestCase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AvgIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]SumIT.java" checks="LineLength" />
|
||||
|
@ -1125,16 +1106,10 @@
|
|||
<suppress files="modules[/\\]lang-expression[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]expression[/\\]MoreExpressionTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyPlugin.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptEngineService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BucketScriptTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]BulkTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]DoubleTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]EquivalenceTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HDRPercentileRanksTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HDRPercentilesTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]HistogramTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IPv4RangeTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IndexLookupTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]IndicesRequestTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]LongTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]MinDocCountTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]MinTests.java" checks="LineLength" />
|
||||
|
@ -1143,8 +1118,6 @@
|
|||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SearchFieldsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]SimpleSortTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]StringTermsTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TDigestPercentileRanksTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]TDigestPercentilesTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]messy[/\\]tests[/\\]package-info.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovyScriptTests.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]lang-groovy[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]groovy[/\\]GroovySecurityTests.java" checks="LineLength" />
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.http.HttpEntity;
|
|||
import org.apache.http.HttpEntityEnclosingRequest;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.client.methods.HttpUriRequest;
|
||||
import org.apache.http.entity.BufferedHttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
|
@ -55,7 +56,7 @@ final class RequestLogger {
|
|||
*/
|
||||
static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("request [" + request.getMethod() + " " + host + request.getRequestLine().getUri() +
|
||||
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) +
|
||||
"] returned [" + httpResponse.getStatusLine() + "]");
|
||||
}
|
||||
if (tracer.isTraceEnabled()) {
|
||||
|
@ -81,8 +82,10 @@ final class RequestLogger {
|
|||
* Logs a request that failed
|
||||
*/
|
||||
static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, IOException e) {
|
||||
logger.debug("request [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "] failed", e);
|
||||
if (logger.isTraceEnabled()) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("request [" + request.getMethod() + " " + host + getUri(request.getRequestLine()) + "] failed", e);
|
||||
}
|
||||
if (tracer.isTraceEnabled()) {
|
||||
String traceRequest;
|
||||
try {
|
||||
traceRequest = buildTraceRequest(request, host);
|
||||
|
@ -98,7 +101,7 @@ final class RequestLogger {
|
|||
* Creates curl output for given request
|
||||
*/
|
||||
static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException {
|
||||
String requestLine = "curl -iX " + request.getMethod() + " '" + host + request.getRequestLine().getUri() + "'";
|
||||
String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestLine()) + "'";
|
||||
if (request instanceof HttpEntityEnclosingRequest) {
|
||||
HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request;
|
||||
if (enclosingRequest.getEntity() != null) {
|
||||
|
@ -143,4 +146,11 @@ final class RequestLogger {
|
|||
}
|
||||
return responseLine;
|
||||
}
|
||||
|
||||
private static String getUri(RequestLine requestLine) {
|
||||
if (requestLine.getUri().charAt(0) != '/') {
|
||||
return "/" + requestLine.getUri();
|
||||
}
|
||||
return requestLine.getUri();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,8 +37,6 @@ import org.apache.http.client.methods.HttpPut;
|
|||
import org.apache.http.client.methods.HttpRequestBase;
|
||||
import org.apache.http.client.methods.HttpTrace;
|
||||
import org.apache.http.client.utils.URIBuilder;
|
||||
import org.apache.http.config.Registry;
|
||||
import org.apache.http.conn.socket.ConnectionSocketFactory;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
|
@ -91,7 +89,7 @@ public final class RestClient implements Closeable {
|
|||
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
|
||||
private final FailureListener failureListener;
|
||||
|
||||
private RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
|
||||
RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
|
||||
HttpHost[] hosts, FailureListener failureListener) {
|
||||
this.client = client;
|
||||
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
|
||||
|
@ -117,6 +115,39 @@ public final class RestClient implements Closeable {
|
|||
this.blacklist.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the current client points to.
|
||||
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without parameters and request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the current client points to.
|
||||
* Shortcut to {@link #performRequest(String, String, Map, HttpEntity, Header...)} but without request body.
|
||||
*
|
||||
* @param method the http method
|
||||
* @param endpoint the path of the request (without host and port)
|
||||
* @param params the query_string parameters
|
||||
* @param headers the optional request headers
|
||||
* @return the response returned by elasticsearch
|
||||
* @throws IOException in case of a problem or the connection was aborted
|
||||
* @throws ClientProtocolException in case of an http protocol error
|
||||
* @throws ResponseException in case elasticsearch responded with a status code that indicated an error
|
||||
*/
|
||||
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
|
||||
return performRequest(method, endpoint, params, null, headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to the elasticsearch cluster that the current client points to.
|
||||
* Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain
|
||||
|
@ -360,10 +391,11 @@ public final class RestClient implements Closeable {
|
|||
private static final Header[] EMPTY_HEADERS = new Header[0];
|
||||
|
||||
private final HttpHost[] hosts;
|
||||
private CloseableHttpClient httpClient;
|
||||
private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
|
||||
private Header[] defaultHeaders = EMPTY_HEADERS;
|
||||
private FailureListener failureListener;
|
||||
private HttpClientConfigCallback httpClientConfigCallback;
|
||||
private RequestConfigCallback requestConfigCallback;
|
||||
|
||||
/**
|
||||
* Creates a new builder instance and sets the hosts that the client will send requests to.
|
||||
|
@ -375,17 +407,6 @@ public final class RestClient implements Closeable {
|
|||
this.hosts = hosts;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the http client. A new default one will be created if not
|
||||
* specified, by calling {@link #createDefaultHttpClient(Registry)})}.
|
||||
*
|
||||
* @see CloseableHttpClient
|
||||
*/
|
||||
public Builder setHttpClient(CloseableHttpClient httpClient) {
|
||||
this.httpClient = httpClient;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
|
||||
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
|
||||
|
@ -401,12 +422,10 @@ public final class RestClient implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets the default request headers, to be used when creating the default http client instance.
|
||||
* In case the http client is set through {@link #setHttpClient(CloseableHttpClient)}, the default headers need to be
|
||||
* set to it externally during http client construction.
|
||||
* Sets the default request headers, to be used sent with every request unless overridden on a per request basis
|
||||
*/
|
||||
public Builder setDefaultHeaders(Header[] defaultHeaders) {
|
||||
Objects.requireNonNull(defaultHeaders, "default headers must not be null");
|
||||
Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null");
|
||||
for (Header defaultHeader : defaultHeaders) {
|
||||
Objects.requireNonNull(defaultHeader, "default header must not be null");
|
||||
}
|
||||
|
@ -418,48 +437,94 @@ public final class RestClient implements Closeable {
|
|||
* Sets the {@link FailureListener} to be notified for each request failure
|
||||
*/
|
||||
public Builder setFailureListener(FailureListener failureListener) {
|
||||
Objects.requireNonNull(failureListener, "failure listener must not be null");
|
||||
Objects.requireNonNull(failureListener, "failureListener must not be null");
|
||||
this.failureListener = failureListener;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link HttpClientConfigCallback} to be used to customize http client configuration
|
||||
*/
|
||||
public Builder setHttpClientConfigCallback(HttpClientConfigCallback httpClientConfigCallback) {
|
||||
Objects.requireNonNull(httpClientConfigCallback, "httpClientConfigCallback must not be null");
|
||||
this.httpClientConfigCallback = httpClientConfigCallback;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the {@link RequestConfigCallback} to be used to customize http client configuration
|
||||
*/
|
||||
public Builder setRequestConfigCallback(RequestConfigCallback requestConfigCallback) {
|
||||
Objects.requireNonNull(requestConfigCallback, "requestConfigCallback must not be null");
|
||||
this.requestConfigCallback = requestConfigCallback;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link RestClient} based on the provided configuration.
|
||||
*/
|
||||
public RestClient build() {
|
||||
if (httpClient == null) {
|
||||
httpClient = createDefaultHttpClient(null);
|
||||
}
|
||||
if (failureListener == null) {
|
||||
failureListener = new FailureListener();
|
||||
}
|
||||
CloseableHttpClient httpClient = createHttpClient();
|
||||
return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a {@link CloseableHttpClient} with default settings. Used when the http client instance is not provided.
|
||||
*
|
||||
* @see CloseableHttpClient
|
||||
*/
|
||||
public static CloseableHttpClient createDefaultHttpClient(Registry<ConnectionSocketFactory> socketFactoryRegistry) {
|
||||
PoolingHttpClientConnectionManager connectionManager;
|
||||
if (socketFactoryRegistry == null) {
|
||||
connectionManager = new PoolingHttpClientConnectionManager();
|
||||
} else {
|
||||
connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
|
||||
private CloseableHttpClient createHttpClient() {
|
||||
//default timeouts are all infinite
|
||||
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
|
||||
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
|
||||
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS);
|
||||
|
||||
if (requestConfigCallback != null) {
|
||||
requestConfigCallback.customizeRequestConfig(requestConfigBuilder);
|
||||
}
|
||||
RequestConfig requestConfig = requestConfigBuilder.build();
|
||||
|
||||
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
|
||||
//default settings may be too constraining
|
||||
connectionManager.setDefaultMaxPerRoute(10);
|
||||
connectionManager.setMaxTotal(30);
|
||||
|
||||
//default timeouts are all infinite
|
||||
RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
|
||||
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
|
||||
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS).build();
|
||||
return HttpClientBuilder.create().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build();
|
||||
HttpClientBuilder httpClientBuilder = HttpClientBuilder.create().setConnectionManager(connectionManager)
|
||||
.setDefaultRequestConfig(requestConfig);
|
||||
|
||||
if (httpClientConfigCallback != null) {
|
||||
httpClientConfigCallback.customizeHttpClient(httpClientBuilder);
|
||||
}
|
||||
return httpClientBuilder.build();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback used the default {@link RequestConfig} being set to the {@link CloseableHttpClient}
|
||||
* @see HttpClientBuilder#setDefaultRequestConfig
|
||||
*/
|
||||
public interface RequestConfigCallback {
|
||||
/**
|
||||
* Allows to customize the {@link RequestConfig} that will be used with each request.
|
||||
* It is common to customize the different timeout values through this method without losing any other useful default
|
||||
* value that the {@link RestClient.Builder} internally sets.
|
||||
*/
|
||||
void customizeRequestConfig(RequestConfig.Builder requestConfigBuilder);
|
||||
}
|
||||
|
||||
/**
|
||||
* Callback used to customize the {@link CloseableHttpClient} instance used by a {@link RestClient} instance.
|
||||
* Allows to customize default {@link RequestConfig} being set to the client and any parameter that
|
||||
* can be set through {@link HttpClientBuilder}
|
||||
*/
|
||||
public interface HttpClientConfigCallback {
|
||||
/**
|
||||
* Allows to customize the {@link CloseableHttpClient} being created and used by the {@link RestClient}.
|
||||
* It is common to customzie the default {@link org.apache.http.client.CredentialsProvider} through this method,
|
||||
* without losing any other useful default value that the {@link RestClient.Builder} internally sets.
|
||||
* Also useful to setup ssl through {@link SSLSocketFactoryHttpConfigCallback}.
|
||||
*/
|
||||
void customizeHttpClient(HttpClientBuilder httpClientBuilder);
|
||||
}
|
||||
|
||||
/**
|
||||
* Listener that allows to be notified whenever a failure happens. Useful when sniffing is enabled, so that we can sniff on failure.
|
||||
* The default implementation is a no-op.
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.config.Registry;
|
||||
import org.apache.http.config.RegistryBuilder;
|
||||
import org.apache.http.conn.socket.ConnectionSocketFactory;
|
||||
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
|
||||
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
|
||||
|
||||
/**
|
||||
* Helps configuring the http client when needing to communicate over ssl. It effectively replaces the connection manager
|
||||
* with one that has ssl properly configured thanks to the provided {@link SSLConnectionSocketFactory}.
|
||||
*/
|
||||
public class SSLSocketFactoryHttpConfigCallback implements RestClient.HttpClientConfigCallback {
|
||||
|
||||
private final SSLConnectionSocketFactory sslSocketFactory;
|
||||
|
||||
public SSLSocketFactoryHttpConfigCallback(SSLConnectionSocketFactory sslSocketFactory) {
|
||||
this.sslSocketFactory = sslSocketFactory;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void customizeHttpClient(HttpClientBuilder httpClientBuilder) {
|
||||
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder.<ConnectionSocketFactory>create()
|
||||
.register("http", PlainConnectionSocketFactory.getSocketFactory())
|
||||
.register("https", sslSocketFactory).build();
|
||||
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
|
||||
//default settings may be too constraining
|
||||
connectionManager.setDefaultMaxPerRoute(10);
|
||||
connectionManager.setMaxTotal(30);
|
||||
httpClientBuilder.setConnectionManager(connectionManager);
|
||||
}
|
||||
}
|
|
@ -50,7 +50,14 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
|
||||
public void testTraceRequest() throws IOException, URISyntaxException {
|
||||
HttpHost host = new HttpHost("localhost", 9200, getRandom().nextBoolean() ? "http" : "https");
|
||||
URI uri = new URI("/index/type/_api");
|
||||
|
||||
String expectedEndpoint = "/index/type/_api";
|
||||
URI uri;
|
||||
if (randomBoolean()) {
|
||||
uri = new URI(expectedEndpoint);
|
||||
} else {
|
||||
uri = new URI("index/type/_api");
|
||||
}
|
||||
|
||||
HttpRequestBase request;
|
||||
int requestType = RandomInts.randomIntBetween(getRandom(), 0, 7);
|
||||
|
@ -83,7 +90,7 @@ public class RequestLoggerTests extends RestClientTestCase {
|
|||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
String expected = "curl -iX " + request.getMethod() + " '" + host + uri + "'";
|
||||
String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'";
|
||||
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
|
||||
String requestBody = "{ \"field\": \"value\" }";
|
||||
if (hasBody) {
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.client;
|
|||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
|
||||
|
@ -67,7 +68,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("default headers must not be null", e.getMessage());
|
||||
assertEquals("defaultHeaders must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
|
@ -81,7 +82,21 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
RestClient.builder(new HttpHost("localhost", 9200)).setFailureListener(null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("failure listener must not be null", e.getMessage());
|
||||
assertEquals("failureListener must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200)).setHttpClientConfigCallback(null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("httpClientConfigCallback must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
RestClient.builder(new HttpHost("localhost", 9200)).setRequestConfigCallback(null);
|
||||
fail("should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("requestConfigCallback must not be null", e.getMessage());
|
||||
}
|
||||
|
||||
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
|
@ -91,7 +106,18 @@ public class RestClientBuilderTests extends RestClientTestCase {
|
|||
}
|
||||
RestClient.Builder builder = RestClient.builder(hosts);
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setHttpClient(HttpClientBuilder.create().build());
|
||||
builder.setHttpClientConfigCallback(new RestClient.HttpClientConfigCallback() {
|
||||
@Override
|
||||
public void customizeHttpClient(HttpClientBuilder httpClientBuilder) {
|
||||
}
|
||||
});
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
builder.setRequestConfigCallback(new RestClient.RequestConfigCallback() {
|
||||
@Override
|
||||
public void customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
|
||||
}
|
||||
});
|
||||
}
|
||||
if (getRandom().nextBoolean()) {
|
||||
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpRequest;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
|
@ -92,7 +92,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
httpHosts[i] = new HttpHost("localhost", 9200 + i);
|
||||
}
|
||||
failureListener = new TrackingFailureListener();
|
||||
restClient = RestClient.builder(httpHosts).setHttpClient(httpClient).setFailureListener(failureListener).build();
|
||||
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, failureListener);
|
||||
}
|
||||
|
||||
public void testRoundRobinOkStatusCodes() throws Exception {
|
||||
|
@ -102,8 +102,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
Collections.addAll(hostsSet, httpHosts);
|
||||
for (int j = 0; j < httpHosts.length; j++) {
|
||||
int statusCode = randomOkStatusCode(getRandom());
|
||||
try (Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode,
|
||||
Collections.<String, String>emptyMap(), null)) {
|
||||
try (Response response = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) {
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode));
|
||||
assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost()));
|
||||
}
|
||||
|
@ -121,8 +120,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
for (int j = 0; j < httpHosts.length; j++) {
|
||||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomErrorNoRetryStatusCode(getRandom());
|
||||
try (Response response = restClient.performRequest(method, "/" + statusCode,
|
||||
Collections.<String, String>emptyMap(), null)) {
|
||||
try (Response response = restClient.performRequest(method, "/" + statusCode)) {
|
||||
if (method.equals("HEAD") && statusCode == 404) {
|
||||
//no exception gets thrown although we got a 404
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(404));
|
||||
|
@ -149,7 +147,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
public void testRoundRobinRetryErrors() throws Exception {
|
||||
String retryEndpoint = randomErrorRetryEndpoint();
|
||||
try {
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint, Collections.<String, String>emptyMap(), null);
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
|
||||
fail("request should have failed");
|
||||
} catch(ResponseException e) {
|
||||
Set<HttpHost> hostsSet = new HashSet<>();
|
||||
|
@ -199,7 +197,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
for (int j = 0; j < httpHosts.length; j++) {
|
||||
retryEndpoint = randomErrorRetryEndpoint();
|
||||
try {
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint, Collections.<String, String>emptyMap(), null);
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
|
||||
fail("request should have failed");
|
||||
} catch(ResponseException e) {
|
||||
Response response = e.getResponse();
|
||||
|
@ -225,8 +223,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
for (int y = 0; y < iters; y++) {
|
||||
int statusCode = randomErrorNoRetryStatusCode(getRandom());
|
||||
Response response;
|
||||
try (Response esResponse = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode,
|
||||
Collections.<String, String>emptyMap(), null)) {
|
||||
try (Response esResponse = restClient.performRequest(randomHttpMethod(getRandom()), "/" + statusCode)) {
|
||||
response = esResponse;
|
||||
}
|
||||
catch(ResponseException e) {
|
||||
|
@ -245,8 +242,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
|
|||
for (int y = 0; y < i + 1; y++) {
|
||||
retryEndpoint = randomErrorRetryEndpoint();
|
||||
try {
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint,
|
||||
Collections.<String, String>emptyMap(), null);
|
||||
restClient.performRequest(randomHttpMethod(getRandom()), retryEndpoint);
|
||||
fail("request should have failed");
|
||||
} catch(ResponseException e) {
|
||||
Response response = e.getResponse();
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import org.apache.http.Header;
|
||||
|
@ -129,8 +128,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
httpHost = new HttpHost("localhost", 9200);
|
||||
failureListener = new TrackingFailureListener();
|
||||
restClient = RestClient.builder(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders)
|
||||
.setFailureListener(failureListener).build();
|
||||
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, failureListener);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -156,7 +154,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testSetNodes() throws IOException {
|
||||
public void testSetHosts() throws IOException {
|
||||
try {
|
||||
restClient.setHosts((HttpHost[]) null);
|
||||
fail("setHosts should have failed");
|
||||
|
@ -189,8 +187,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
public void testOkStatusCodes() throws Exception {
|
||||
for (String method : getHttpMethods()) {
|
||||
for (int okStatusCode : getOkStatusCodes()) {
|
||||
Response response = restClient.performRequest(method, "/" + okStatusCode,
|
||||
Collections.<String, String>emptyMap(), null);
|
||||
Response response = performRequest(method, "/" + okStatusCode);
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode));
|
||||
}
|
||||
}
|
||||
|
@ -204,8 +201,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
for (String method : getHttpMethods()) {
|
||||
//error status codes should cause an exception to be thrown
|
||||
for (int errorStatusCode : getAllErrorStatusCodes()) {
|
||||
try (Response response = restClient.performRequest(method, "/" + errorStatusCode,
|
||||
Collections.<String, String>emptyMap(), null)) {
|
||||
try (Response response = performRequest(method, "/" + errorStatusCode)) {
|
||||
if (method.equals("HEAD") && errorStatusCode == 404) {
|
||||
//no exception gets thrown although we got a 404
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode));
|
||||
|
@ -231,14 +227,14 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
for (String method : getHttpMethods()) {
|
||||
//IOExceptions should be let bubble up
|
||||
try {
|
||||
restClient.performRequest(method, "/coe", Collections.<String, String>emptyMap(), null);
|
||||
performRequest(method, "/coe");
|
||||
fail("request should have failed");
|
||||
} catch(IOException e) {
|
||||
assertThat(e, instanceOf(ConnectTimeoutException.class));
|
||||
}
|
||||
failureListener.assertCalled(httpHost);
|
||||
try {
|
||||
restClient.performRequest(method, "/soe", Collections.<String, String>emptyMap(), null);
|
||||
performRequest(method, "/soe");
|
||||
fail("request should have failed");
|
||||
} catch(IOException e) {
|
||||
assertThat(e, instanceOf(SocketTimeoutException.class));
|
||||
|
@ -275,8 +271,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) {
|
||||
try {
|
||||
restClient.performRequest(method, "/" + randomStatusCode(getRandom()),
|
||||
Collections.<String, String>emptyMap(), entity);
|
||||
restClient.performRequest(method, "/" + randomStatusCode(getRandom()), Collections.<String, String>emptyMap(), entity);
|
||||
fail("request should have failed");
|
||||
} catch(UnsupportedOperationException e) {
|
||||
assertThat(e.getMessage(), equalTo(method + " with body is not supported"));
|
||||
|
@ -288,13 +283,13 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), null, (Header[])null);
|
||||
performRequest(method, "/" + statusCode, (Header[])null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("request headers must not be null", e.getMessage());
|
||||
}
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), null, (Header)null);
|
||||
performRequest(method, "/" + statusCode, (Header)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("request header must not be null", e.getMessage());
|
||||
|
@ -305,7 +300,13 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
String method = randomHttpMethod(getRandom());
|
||||
int statusCode = randomStatusCode(getRandom());
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, null, null);
|
||||
restClient.performRequest(method, "/" + statusCode, (Map<String, String>)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("params must not be null", e.getMessage());
|
||||
}
|
||||
try {
|
||||
restClient.performRequest(method, "/" + statusCode, null, (HttpEntity)null);
|
||||
fail("request should have failed");
|
||||
} catch(NullPointerException e) {
|
||||
assertEquals("params must not be null", e.getMessage());
|
||||
|
@ -352,7 +353,8 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
String uriAsString = "/" + randomStatusCode(getRandom());
|
||||
URIBuilder uriBuilder = new URIBuilder(uriAsString);
|
||||
Map<String, String> params = Collections.emptyMap();
|
||||
if (getRandom().nextBoolean()) {
|
||||
boolean hasParams = randomBoolean();
|
||||
if (hasParams) {
|
||||
int numParams = RandomInts.randomIntBetween(getRandom(), 1, 3);
|
||||
params = new HashMap<>(numParams);
|
||||
for (int i = 0; i < numParams; i++) {
|
||||
|
@ -395,7 +397,8 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
HttpEntity entity = null;
|
||||
if (request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) {
|
||||
boolean hasBody = request instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean();
|
||||
if (hasBody) {
|
||||
entity = new StringEntity(RandomStrings.randomAsciiOfLengthBetween(getRandom(), 10, 100));
|
||||
((HttpEntityEnclosingRequest) request).setEntity(entity);
|
||||
}
|
||||
|
@ -418,10 +421,29 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
try {
|
||||
restClient.performRequest(method, uriAsString, params, entity, headers);
|
||||
if (hasParams == false && hasBody == false && randomBoolean()) {
|
||||
restClient.performRequest(method, uriAsString, headers);
|
||||
} else if (hasBody == false && randomBoolean()) {
|
||||
restClient.performRequest(method, uriAsString, params, headers);
|
||||
} else {
|
||||
restClient.performRequest(method, uriAsString, params, entity, headers);
|
||||
}
|
||||
} catch(ResponseException e) {
|
||||
//all good
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
private Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
switch(randomIntBetween(0, 2)) {
|
||||
case 0:
|
||||
return restClient.performRequest(method, endpoint, headers);
|
||||
case 1:
|
||||
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), headers);
|
||||
case 2:
|
||||
return restClient.performRequest(method, endpoint, Collections.<String, String>emptyMap(), null, headers);
|
||||
default:
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ public class HostsSniffer {
|
|||
* Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts
|
||||
*/
|
||||
public List<HttpHost> sniffHosts() throws IOException {
|
||||
try (Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams, null)) {
|
||||
try (Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams)) {
|
||||
return readHosts(response.getEntity());
|
||||
}
|
||||
}
|
||||
|
@ -156,7 +156,7 @@ public class HostsSniffer {
|
|||
|
||||
private final RestClient restClient;
|
||||
private long sniffRequestTimeoutMillis = DEFAULT_SNIFF_REQUEST_TIMEOUT;
|
||||
private Scheme scheme;
|
||||
private Scheme scheme = Scheme.HTTP;
|
||||
|
||||
private Builder(RestClient restClient) {
|
||||
Objects.requireNonNull(restClient, "restClient cannot be null");
|
||||
|
|
|
@ -89,7 +89,11 @@ public class HostsSnifferTests extends RestClientTestCase {
|
|||
public void testSniffNodes() throws IOException, URISyntaxException {
|
||||
HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort());
|
||||
try (RestClient restClient = RestClient.builder(httpHost).build()) {
|
||||
HostsSniffer sniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme);
|
||||
HostsSniffer.Builder builder = HostsSniffer.builder(restClient).setSniffRequestTimeoutMillis(sniffRequestTimeout);
|
||||
if (scheme != HostsSniffer.Scheme.HTTP || randomBoolean()) {
|
||||
builder.setScheme(scheme);
|
||||
}
|
||||
HostsSniffer sniffer = builder.build();
|
||||
try {
|
||||
List<HttpHost> sniffedHosts = sniffer.sniffHosts();
|
||||
if (sniffResponse.isFailure) {
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
group = 'org.elasticsearch.client'
|
||||
|
||||
dependencies {
|
||||
compile "org.elasticsearch:elasticsearch:${version}"
|
||||
compile project(path: ':modules:transport-netty3', configuration: 'runtime')
|
||||
compile project(path: ':modules:reindex', configuration: 'runtime')
|
||||
compile project(path: ':modules:lang-mustache', configuration: 'runtime')
|
||||
compile project(path: ':modules:percolator', configuration: 'runtime')
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
dependencies = project.configurations.runtime.fileCollection {
|
||||
it.group.startsWith('org.elasticsearch') == false
|
||||
}
|
||||
}
|
||||
|
||||
forbiddenApisTest {
|
||||
// we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to
|
||||
// be pulled in
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
|
||||
PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')]
|
||||
}
|
||||
|
||||
namingConventions {
|
||||
testClass = 'com.carrotsearch.randomizedtesting.RandomizedTest'
|
||||
//we don't have integration tests
|
||||
skipIntegTestInDisguise = true
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.transport.client;
|
||||
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.reindex.ReindexPlugin;
|
||||
import org.elasticsearch.percolator.PercolatorPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.mustache.MustachePlugin;
|
||||
import org.elasticsearch.transport.Netty3Plugin;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
|
||||
/**
|
||||
* A builder to create an instance of {@link TransportClient}
|
||||
* This class pre-installs the {@link Netty3Plugin}, {@link ReindexPlugin}, {@link PercolatorPlugin}, and {@link MustachePlugin}
|
||||
* for the client. These plugins are all elasticsearch core modules required.
|
||||
*/
|
||||
@SuppressWarnings({"unchecked","varargs"})
|
||||
public class PreBuiltTransportClient extends TransportClient {
|
||||
private static final Collection<Class<? extends Plugin>> PRE_INSTALLED_PLUGINS = Collections.unmodifiableList(Arrays.asList(
|
||||
TransportPlugin.class, ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class));
|
||||
|
||||
@SafeVarargs
|
||||
public PreBuiltTransportClient(Settings settings, Class<? extends Plugin>... plugins) {
|
||||
this(settings, Arrays.asList(plugins));
|
||||
}
|
||||
|
||||
public PreBuiltTransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS));
|
||||
}
|
||||
|
||||
/**
|
||||
* The default transport implementation for the transport client.
|
||||
*/
|
||||
public static final class TransportPlugin extends Netty3Plugin {
|
||||
// disable assertions for permissions since we might not have the permissions here
|
||||
// compared to if we are loaded as a real module to the es server
|
||||
public TransportPlugin(Settings settings) {
|
||||
super(Settings.builder().put("netty.assert.buglevel", false).put(settings).build());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.transport.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedTest;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.reindex.ReindexPlugin;
|
||||
import org.elasticsearch.percolator.PercolatorPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.script.mustache.MustachePlugin;
|
||||
import org.elasticsearch.transport.Netty3Plugin;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
public class PreBuiltTransportClientTests extends RandomizedTest {
|
||||
|
||||
@Test
|
||||
public void testPluginInstalled() {
|
||||
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
|
||||
Settings settings = client.settings();
|
||||
assertEquals(Netty3Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testInstallPluginTwice() {
|
||||
|
||||
for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class,
|
||||
MustachePlugin.class)) {
|
||||
try {
|
||||
new PreBuiltTransportClient(Settings.EMPTY, plugin);
|
||||
fail("exception expected");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals("plugin is already installed", ex.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -56,10 +56,10 @@ dependencies {
|
|||
compile "org.apache.lucene:lucene-spatial3d:${versions.lucene}"
|
||||
compile "org.apache.lucene:lucene-suggest:${versions.lucene}"
|
||||
|
||||
compile 'org.elasticsearch:securesm:1.0'
|
||||
compile 'org.elasticsearch:securesm:1.1'
|
||||
|
||||
// utilities
|
||||
compile 'net.sf.jopt-simple:jopt-simple:4.9'
|
||||
compile 'net.sf.jopt-simple:jopt-simple:5.0.2'
|
||||
compile 'com.carrotsearch:hppc:0.7.1'
|
||||
|
||||
// time handling, remove with java 8 time
|
||||
|
@ -74,8 +74,6 @@ dependencies {
|
|||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
|
||||
|
||||
// network stack
|
||||
compile 'io.netty:netty:3.10.6.Final'
|
||||
// percentiles aggregation
|
||||
compile 'com.tdunning:t-digest:3.0'
|
||||
// precentil ranks aggregation
|
||||
|
@ -152,26 +150,11 @@ processResources {
|
|||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
|
||||
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
|
||||
|
||||
// classes are missing!
|
||||
|
||||
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
|
||||
'com.fasterxml.jackson.databind.ObjectMapper',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder (netty)
|
||||
'com.google.protobuf.CodedInputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender (netty)
|
||||
'com.google.protobuf.CodedOutputStream',
|
||||
|
||||
// from org.jboss.netty.handler.codec.protobuf.ProtobufDecoder (netty)
|
||||
'com.google.protobuf.ExtensionRegistry',
|
||||
'com.google.protobuf.MessageLite$Builder',
|
||||
'com.google.protobuf.MessageLite',
|
||||
'com.google.protobuf.Parser',
|
||||
|
||||
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
|
||||
'javax.jms.Message',
|
||||
'javax.jms.MessageListener',
|
||||
|
@ -196,72 +179,8 @@ thirdPartyAudit.excludes = [
|
|||
'javax.mail.internet.MimeMessage',
|
||||
'javax.mail.internet.MimeMultipart',
|
||||
'javax.mail.internet.MimeUtility',
|
||||
|
||||
// from org.jboss.netty.channel.socket.http.HttpTunnelingServlet (netty)
|
||||
'javax.servlet.ServletConfig',
|
||||
'javax.servlet.ServletException',
|
||||
'javax.servlet.ServletOutputStream',
|
||||
'javax.servlet.http.HttpServlet',
|
||||
'javax.servlet.http.HttpServletRequest',
|
||||
'javax.servlet.http.HttpServletResponse',
|
||||
|
||||
// from org.jboss.netty.logging.CommonsLoggerFactory (netty)
|
||||
'org.apache.commons.logging.Log',
|
||||
'org.apache.commons.logging.LogFactory',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
|
||||
'org.apache.tomcat.jni.Buffer',
|
||||
'org.apache.tomcat.jni.Library',
|
||||
'org.apache.tomcat.jni.Pool',
|
||||
'org.apache.tomcat.jni.SSL',
|
||||
'org.apache.tomcat.jni.SSLContext',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty)
|
||||
'org.bouncycastle.asn1.x500.X500Name',
|
||||
'org.bouncycastle.cert.X509v3CertificateBuilder',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter',
|
||||
'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder',
|
||||
'org.bouncycastle.jce.provider.BouncyCastleProvider',
|
||||
'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder',
|
||||
|
||||
// from org.jboss.netty.handler.ssl.JettyNpnSslEngine (netty)
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ClientProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego$ServerProvider',
|
||||
'org.eclipse.jetty.npn.NextProtoNego',
|
||||
|
||||
// from org.jboss.netty.logging.JBossLoggerFactory (netty)
|
||||
'org.jboss.logging.Logger',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteInput (netty)
|
||||
'org.jboss.marshalling.ByteInput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty)
|
||||
'org.jboss.marshalling.ByteOutput',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty)
|
||||
'org.jboss.marshalling.Marshaller',
|
||||
|
||||
// from org.jboss.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty)
|
||||
'org.jboss.marshalling.MarshallerFactory',
|
||||
'org.jboss.marshalling.MarshallingConfiguration',
|
||||
'org.jboss.marshalling.Unmarshaller',
|
||||
|
||||
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
|
||||
'org.noggit.JSONParser',
|
||||
|
||||
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
|
||||
'org.osgi.framework.BundleActivator',
|
||||
'org.osgi.framework.BundleContext',
|
||||
|
||||
// from org.jboss.netty.logging.OsgiLoggerFactory$1 (netty)
|
||||
'org.osgi.framework.ServiceReference',
|
||||
'org.osgi.service.log.LogService',
|
||||
'org.osgi.util.tracker.ServiceTracker',
|
||||
'org.osgi.util.tracker.ServiceTrackerCustomizer',
|
||||
|
||||
// from org.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
|
||||
'org.slf4j.Logger',
|
||||
'org.slf4j.LoggerFactory',
|
||||
]
|
||||
|
||||
// dependency license are currently checked in distribution
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.search.MultiPhraseQuery;
|
|||
import org.apache.lucene.search.PhraseQuery;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
|
||||
|
@ -71,6 +72,9 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
} else if (sourceQuery instanceof BlendedTermQuery) {
|
||||
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
|
||||
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof ToParentBlockJoinQuery) {
|
||||
ToParentBlockJoinQuery blockJoinQuery = (ToParentBlockJoinQuery) sourceQuery;
|
||||
flatten(blockJoinQuery.getChildQuery(), reader, flatQueries, boost);
|
||||
} else {
|
||||
super.flatten(sourceQuery, reader, flatQueries, boost);
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.rest.RestStatus;
|
|||
import org.elasticsearch.transport.TcpTransport;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -102,16 +101,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
public ElasticsearchException(StreamInput in) throws IOException {
|
||||
super(in.readOptionalString(), in.readException());
|
||||
readStackTrace(this, in);
|
||||
int numKeys = in.readVInt();
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
final String key = in.readString();
|
||||
final int numValues = in.readVInt();
|
||||
final ArrayList<String> values = new ArrayList<>(numValues);
|
||||
for (int j = 0; j < numValues; j++) {
|
||||
values.add(in.readString());
|
||||
}
|
||||
headers.put(key, values);
|
||||
}
|
||||
headers.putAll(in.readMapOfLists());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -206,14 +196,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
out.writeOptionalString(this.getMessage());
|
||||
out.writeException(this.getCause());
|
||||
writeStackTraces(this, out);
|
||||
out.writeVInt(headers.size());
|
||||
for (Map.Entry<String, List<String>> entry : headers.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeVInt(entry.getValue().size());
|
||||
for (String v : entry.getValue()) {
|
||||
out.writeString(v);
|
||||
}
|
||||
}
|
||||
out.writeMapOfLists(headers);
|
||||
}
|
||||
|
||||
public static ElasticsearchException readException(StreamInput input, int id) throws IOException {
|
||||
|
@ -675,8 +658,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
|
||||
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class,
|
||||
org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
|
||||
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class,
|
||||
org.elasticsearch.script.Script.ScriptParseException::new, 124),
|
||||
// 124 used to be Script.ScriptParseException
|
||||
HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class,
|
||||
TcpTransport.HttpOnTransportException::new, 125),
|
||||
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
|
||||
|
@ -709,7 +691,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
|
|||
org.elasticsearch.index.query.QueryShardException::new, 141),
|
||||
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
|
||||
ShardStateAction.NoLongerPrimaryShardException::new, 142),
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143);
|
||||
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143),
|
||||
NOT_MASTER_EXCEPTION(org.elasticsearch.cluster.NotMasterException.class, org.elasticsearch.cluster.NotMasterException::new, 144);
|
||||
|
||||
|
||||
final Class<? extends ElasticsearchException> exceptionClass;
|
||||
|
|
|
@ -69,6 +69,8 @@ public class Version {
|
|||
public static final Version V_2_3_2 = new Version(V_2_3_2_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_3_ID = 2030399;
|
||||
public static final Version V_2_3_3 = new Version(V_2_3_3_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_2_3_4_ID = 2030499;
|
||||
public static final Version V_2_3_4 = new Version(V_2_3_4_ID, org.apache.lucene.util.Version.LUCENE_5_5_0);
|
||||
public static final int V_5_0_0_alpha1_ID = 5000001;
|
||||
public static final Version V_5_0_0_alpha1 = new Version(V_5_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_0_0);
|
||||
public static final int V_5_0_0_alpha2_ID = 5000002;
|
||||
|
@ -102,6 +104,8 @@ public class Version {
|
|||
return V_5_0_0_alpha2;
|
||||
case V_5_0_0_alpha1_ID:
|
||||
return V_5_0_0_alpha1;
|
||||
case V_2_3_4_ID:
|
||||
return V_2_3_4;
|
||||
case V_2_3_3_ID:
|
||||
return V_2_3_3;
|
||||
case V_2_3_2_ID:
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* A listener for action responses or failures.
|
||||
*/
|
||||
|
@ -33,4 +35,31 @@ public interface ActionListener<Response> {
|
|||
* A failure caused by an exception at some phase of the task.
|
||||
*/
|
||||
void onFailure(Exception e);
|
||||
|
||||
/**
|
||||
* Creates a listener that listens for a response (or failure) and executes the
|
||||
* corresponding consumer when the response (or failure) is received.
|
||||
*
|
||||
* @param onResponse the consumer of the response, when the listener receives one
|
||||
* @param onFailure the consumer of the failure, when the listener receives one
|
||||
* @param <Response> the type of the response
|
||||
* @return a listener that listens for responses and invokes the consumer when received
|
||||
*/
|
||||
static <Response> ActionListener<Response> wrap(Consumer<Response> onResponse, Consumer<Exception> onFailure) {
|
||||
return new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
try {
|
||||
onResponse.accept(response);
|
||||
} catch (Exception e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
onFailure.accept(e);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,10 +20,12 @@
|
|||
package org.elasticsearch.action;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
|
||||
import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction;
|
||||
|
@ -335,7 +337,8 @@ public class ActionModule extends AbstractModule {
|
|||
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
|
||||
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
|
||||
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
|
||||
restController = new RestController(settings);
|
||||
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
|
||||
restController = new RestController(settings, headers);
|
||||
}
|
||||
|
||||
public Map<String, ActionHandler<?, ?>> getActions() {
|
||||
|
|
|
@ -52,6 +52,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
private Integer shard;
|
||||
private Boolean primary;
|
||||
private boolean includeYesDecisions = false;
|
||||
private boolean includeDiskInfo = false;
|
||||
|
||||
/** Explain the first unassigned shard */
|
||||
public ClusterAllocationExplainRequest() {
|
||||
|
@ -134,6 +135,16 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
return this.includeYesDecisions;
|
||||
}
|
||||
|
||||
/** {@code true} to include information about the gathered disk information of nodes in the cluster */
|
||||
public void includeDiskInfo(boolean includeDiskInfo) {
|
||||
this.includeDiskInfo = includeDiskInfo;
|
||||
}
|
||||
|
||||
/** Returns true if information about disk usage and shard sizes should also be returned */
|
||||
public boolean includeDiskInfo() {
|
||||
return this.includeDiskInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder("ClusterAllocationExplainRequest[");
|
||||
|
@ -164,6 +175,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
this.shard = in.readOptionalVInt();
|
||||
this.primary = in.readOptionalBoolean();
|
||||
this.includeYesDecisions = in.readBoolean();
|
||||
this.includeDiskInfo = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -173,5 +185,6 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
|
|||
out.writeOptionalVInt(shard);
|
||||
out.writeOptionalBoolean(primary);
|
||||
out.writeBoolean(includeYesDecisions);
|
||||
out.writeBoolean(includeDiskInfo);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,6 +53,18 @@ public class ClusterAllocationExplainRequestBuilder
|
|||
return this;
|
||||
}
|
||||
|
||||
/** Whether to include "YES" decider decisions in the response instead of only "NO" decisions */
|
||||
public ClusterAllocationExplainRequestBuilder setIncludeYesDecisions(boolean includeYesDecisions) {
|
||||
request.includeYesDecisions(includeYesDecisions);
|
||||
return this;
|
||||
}
|
||||
|
||||
/** Whether to include information about the gathered disk information of nodes in the cluster */
|
||||
public ClusterAllocationExplainRequestBuilder setIncludeDiskInfo(boolean includeDiskInfo) {
|
||||
request.includeDiskInfo(includeDiskInfo);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Signal that the first unassigned shard should be used
|
||||
*/
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -48,10 +49,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
private final long allocationDelayMillis;
|
||||
private final long remainingDelayMillis;
|
||||
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
|
||||
private final ClusterInfo clusterInfo;
|
||||
|
||||
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
|
||||
long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
|
||||
Map<DiscoveryNode, NodeExplanation> nodeExplanations, @Nullable ClusterInfo clusterInfo) {
|
||||
this.shard = shard;
|
||||
this.primary = primary;
|
||||
this.hasPendingAsyncFetch = hasPendingAsyncFetch;
|
||||
|
@ -60,6 +62,7 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
this.allocationDelayMillis = allocationDelayMillis;
|
||||
this.remainingDelayMillis = remainingDelayMillis;
|
||||
this.nodeExplanations = nodeExplanations;
|
||||
this.clusterInfo = clusterInfo;
|
||||
}
|
||||
|
||||
public ClusterAllocationExplanation(StreamInput in) throws IOException {
|
||||
|
@ -78,6 +81,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation);
|
||||
}
|
||||
this.nodeExplanations = nodeToExplanation;
|
||||
if (in.readBoolean()) {
|
||||
this.clusterInfo = new ClusterInfo(in);
|
||||
} else {
|
||||
this.clusterInfo = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -94,6 +102,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
for (NodeExplanation explanation : this.nodeExplanations.values()) {
|
||||
explanation.writeTo(out);
|
||||
}
|
||||
if (this.clusterInfo != null) {
|
||||
out.writeBoolean(true);
|
||||
this.clusterInfo.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
|
||||
/** Return the shard that the explanation is about */
|
||||
|
@ -143,6 +157,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
return this.nodeExplanations;
|
||||
}
|
||||
|
||||
/** Return the cluster disk info for the cluster or null if none available */
|
||||
@Nullable
|
||||
public ClusterInfo getClusterInfo() {
|
||||
return this.clusterInfo;
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(); {
|
||||
builder.startObject("shard"); {
|
||||
|
@ -164,11 +184,18 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
|
|||
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
|
||||
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
|
||||
}
|
||||
builder.startObject("nodes");
|
||||
for (NodeExplanation explanation : nodeExplanations.values()) {
|
||||
explanation.toXContent(builder, params);
|
||||
builder.startObject("nodes"); {
|
||||
for (NodeExplanation explanation : nodeExplanations.values()) {
|
||||
explanation.toXContent(builder, params);
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end nodes
|
||||
if (this.clusterInfo != null) {
|
||||
builder.startObject("cluster_info"); {
|
||||
this.clusterInfo.toXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end "cluster_info"
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end wrapping object
|
||||
return builder;
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
|
|||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
|
@ -219,7 +220,7 @@ public class TransportClusterAllocationExplainAction
|
|||
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
|
||||
boolean includeYesDecisions, ShardsAllocator shardAllocator,
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStores,
|
||||
GatewayAllocator gatewayAllocator) {
|
||||
GatewayAllocator gatewayAllocator, ClusterInfo clusterInfo) {
|
||||
// don't short circuit deciders, we want a full explanation
|
||||
allocation.debugDecision(true);
|
||||
// get the existing unassigned info if available
|
||||
|
@ -262,16 +263,17 @@ public class TransportClusterAllocationExplainAction
|
|||
explanations.put(node, nodeExplanation);
|
||||
}
|
||||
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
|
||||
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
|
||||
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations);
|
||||
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
|
||||
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations, clusterInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
|
||||
final ActionListener<ClusterAllocationExplainResponse> listener) {
|
||||
final RoutingNodes routingNodes = state.getRoutingNodes();
|
||||
final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo();
|
||||
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
|
||||
clusterInfoService.getClusterInfo(), System.nanoTime(), false);
|
||||
clusterInfo, System.nanoTime(), false);
|
||||
|
||||
ShardRouting foundShard = null;
|
||||
if (request.useAnyUnassignedShard()) {
|
||||
|
@ -318,7 +320,8 @@ public class TransportClusterAllocationExplainAction
|
|||
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
|
||||
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator);
|
||||
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator,
|
||||
request.includeDiskInfo() ? clusterInfo : null);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
|
|
|
@ -128,6 +128,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
|
|||
* {@code group_by=nodes}.
|
||||
*/
|
||||
public void setDiscoveryNodes(DiscoveryNodes discoveryNodes) {
|
||||
//WTF is this? Why isn't this set by default;
|
||||
this.discoveryNodes = discoveryNodes;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,14 +18,22 @@
|
|||
*/
|
||||
package org.elasticsearch.action.admin.indices.analyze;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
|
@ -39,11 +47,11 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
|
||||
private String analyzer;
|
||||
|
||||
private String tokenizer;
|
||||
private NameOrDefinition tokenizer;
|
||||
|
||||
private String[] tokenFilters = Strings.EMPTY_ARRAY;
|
||||
private final List<NameOrDefinition> tokenFilters = new ArrayList<>();
|
||||
|
||||
private String[] charFilters = Strings.EMPTY_ARRAY;
|
||||
private final List<NameOrDefinition> charFilters = new ArrayList<>();
|
||||
|
||||
private String field;
|
||||
|
||||
|
@ -51,6 +59,48 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
|
||||
private String[] attributes = Strings.EMPTY_ARRAY;
|
||||
|
||||
public static class NameOrDefinition implements Writeable {
|
||||
// exactly one of these two members is not null
|
||||
public final String name;
|
||||
public final Settings definition;
|
||||
|
||||
NameOrDefinition(String name) {
|
||||
this.name = Objects.requireNonNull(name);
|
||||
this.definition = null;
|
||||
}
|
||||
|
||||
NameOrDefinition(Map<String, ?> definition) {
|
||||
this.name = null;
|
||||
Objects.requireNonNull(definition);
|
||||
try {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||
builder.map(definition);
|
||||
this.definition = Settings.builder().loadFromSource(builder.string()).build();
|
||||
} catch (IOException e) {
|
||||
throw new IllegalArgumentException("Failed to parse [" + definition + "]", e);
|
||||
}
|
||||
}
|
||||
|
||||
NameOrDefinition(StreamInput in) throws IOException {
|
||||
name = in.readOptionalString();
|
||||
if (in.readBoolean()) {
|
||||
definition = Settings.readSettingsFromStream(in);
|
||||
} else {
|
||||
definition = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalString(name);
|
||||
boolean isNotNullDefinition = this.definition != null;
|
||||
out.writeBoolean(isNotNullDefinition);
|
||||
if (isNotNullDefinition) {
|
||||
Settings.writeSettingsToStream(definition, out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public AnalyzeRequest() {
|
||||
}
|
||||
|
||||
|
@ -82,35 +132,43 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
}
|
||||
|
||||
public AnalyzeRequest tokenizer(String tokenizer) {
|
||||
this.tokenizer = tokenizer;
|
||||
this.tokenizer = new NameOrDefinition(tokenizer);
|
||||
return this;
|
||||
}
|
||||
|
||||
public String tokenizer() {
|
||||
public AnalyzeRequest tokenizer(Map<String, ?> tokenizer) {
|
||||
this.tokenizer = new NameOrDefinition(tokenizer);
|
||||
return this;
|
||||
}
|
||||
|
||||
public NameOrDefinition tokenizer() {
|
||||
return this.tokenizer;
|
||||
}
|
||||
|
||||
public AnalyzeRequest tokenFilters(String... tokenFilters) {
|
||||
if (tokenFilters == null) {
|
||||
throw new IllegalArgumentException("token filters must not be null");
|
||||
}
|
||||
this.tokenFilters = tokenFilters;
|
||||
public AnalyzeRequest addTokenFilter(String tokenFilter) {
|
||||
this.tokenFilters.add(new NameOrDefinition(tokenFilter));
|
||||
return this;
|
||||
}
|
||||
|
||||
public String[] tokenFilters() {
|
||||
public AnalyzeRequest addTokenFilter(Map<String, ?> tokenFilter) {
|
||||
this.tokenFilters.add(new NameOrDefinition(tokenFilter));
|
||||
return this;
|
||||
}
|
||||
|
||||
public List<NameOrDefinition> tokenFilters() {
|
||||
return this.tokenFilters;
|
||||
}
|
||||
|
||||
public AnalyzeRequest charFilters(String... charFilters) {
|
||||
if (charFilters == null) {
|
||||
throw new IllegalArgumentException("char filters must not be null");
|
||||
}
|
||||
this.charFilters = charFilters;
|
||||
public AnalyzeRequest addCharFilter(Map<String, ?> charFilter) {
|
||||
this.charFilters.add(new NameOrDefinition(charFilter));
|
||||
return this;
|
||||
}
|
||||
|
||||
public String[] charFilters() {
|
||||
public AnalyzeRequest addCharFilter(String charFilter) {
|
||||
this.charFilters.add(new NameOrDefinition(charFilter));
|
||||
return this;
|
||||
}
|
||||
public List<NameOrDefinition> charFilters() {
|
||||
return this.charFilters;
|
||||
}
|
||||
|
||||
|
@ -158,14 +216,12 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
super.readFrom(in);
|
||||
text = in.readStringArray();
|
||||
analyzer = in.readOptionalString();
|
||||
tokenizer = in.readOptionalString();
|
||||
tokenFilters = in.readStringArray();
|
||||
charFilters = in.readStringArray();
|
||||
tokenizer = in.readOptionalWriteable(NameOrDefinition::new);
|
||||
tokenFilters.addAll(in.readList(NameOrDefinition::new));
|
||||
charFilters.addAll(in.readList(NameOrDefinition::new));
|
||||
field = in.readOptionalString();
|
||||
if (in.getVersion().onOrAfter(Version.V_2_2_0)) {
|
||||
explain = in.readBoolean();
|
||||
attributes = in.readStringArray();
|
||||
}
|
||||
explain = in.readBoolean();
|
||||
attributes = in.readStringArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -173,13 +229,11 @@ public class AnalyzeRequest extends SingleShardRequest<AnalyzeRequest> {
|
|||
super.writeTo(out);
|
||||
out.writeStringArray(text);
|
||||
out.writeOptionalString(analyzer);
|
||||
out.writeOptionalString(tokenizer);
|
||||
out.writeStringArray(tokenFilters);
|
||||
out.writeStringArray(charFilters);
|
||||
out.writeOptionalWriteable(tokenizer);
|
||||
out.writeList(tokenFilters);
|
||||
out.writeList(charFilters);
|
||||
out.writeOptionalString(field);
|
||||
if (out.getVersion().onOrAfter(Version.V_2_2_0)) {
|
||||
out.writeBoolean(explain);
|
||||
out.writeStringArray(attributes);
|
||||
}
|
||||
out.writeBoolean(explain);
|
||||
out.writeStringArray(attributes);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.action.admin.indices.analyze;
|
|||
import org.elasticsearch.action.support.single.shard.SingleShardOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -54,7 +56,7 @@ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder<An
|
|||
}
|
||||
|
||||
/**
|
||||
* Instead of setting the analyzer, sets the tokenizer that will be used as part of a custom
|
||||
* Instead of setting the analyzer, sets the tokenizer as name that will be used as part of a custom
|
||||
* analyzer.
|
||||
*/
|
||||
public AnalyzeRequestBuilder setTokenizer(String tokenizer) {
|
||||
|
@ -63,18 +65,43 @@ public class AnalyzeRequestBuilder extends SingleShardOperationRequestBuilder<An
|
|||
}
|
||||
|
||||
/**
|
||||
* Sets token filters that will be used on top of a tokenizer provided.
|
||||
* Instead of setting the analyzer, sets the tokenizer using custom settings that will be used as part of a custom
|
||||
* analyzer.
|
||||
*/
|
||||
public AnalyzeRequestBuilder setTokenFilters(String... tokenFilters) {
|
||||
request.tokenFilters(tokenFilters);
|
||||
public AnalyzeRequestBuilder setTokenizer(Map<String, ?> tokenizer) {
|
||||
request.tokenizer(tokenizer);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets char filters that will be used before the tokenizer.
|
||||
* Add token filter setting that will be used on top of a tokenizer provided.
|
||||
*/
|
||||
public AnalyzeRequestBuilder setCharFilters(String... charFilters) {
|
||||
request.charFilters(charFilters);
|
||||
public AnalyzeRequestBuilder addTokenFilter(Map<String, ?> tokenFilter) {
|
||||
request.addTokenFilter(tokenFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a name of token filter that will be used on top of a tokenizer provided.
|
||||
*/
|
||||
public AnalyzeRequestBuilder addTokenFilter(String tokenFilter) {
|
||||
request.addTokenFilter(tokenFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add char filter setting that will be used on top of a tokenizer provided.
|
||||
*/
|
||||
public AnalyzeRequestBuilder addCharFilter(Map<String, ?> charFilter) {
|
||||
request.addCharFilter(charFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a name of char filter that will be used before the tokenizer.
|
||||
*/
|
||||
public AnalyzeRequestBuilder addCharFilter(String tokenFilter) {
|
||||
request.addCharFilter(tokenFilter);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -25,23 +25,25 @@ import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
|||
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
|
||||
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
|
||||
import org.apache.lucene.util.Attribute;
|
||||
import org.apache.lucene.util.AttributeReflector;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.FastStringReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AnalysisRegistry;
|
||||
import org.elasticsearch.index.analysis.AnalysisService;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
|
@ -167,65 +169,13 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
}
|
||||
|
||||
} else if (request.tokenizer() != null) {
|
||||
TokenizerFactory tokenizerFactory;
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(request.tokenizer());
|
||||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]");
|
||||
}
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(environment, request.tokenizer());
|
||||
} else {
|
||||
tokenizerFactory = analysisService.tokenizer(request.tokenizer());
|
||||
if (tokenizerFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]");
|
||||
}
|
||||
}
|
||||
TokenizerFactory tokenizerFactory = parseTokenizerFactory(request, analysisService, analysisRegistry, environment);
|
||||
|
||||
TokenFilterFactory[] tokenFilterFactories = new TokenFilterFactory[0];
|
||||
if (request.tokenFilters() != null && request.tokenFilters().length > 0) {
|
||||
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().length];
|
||||
for (int i = 0; i < request.tokenFilters().length; i++) {
|
||||
String tokenFilterName = request.tokenFilters()[i];
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilterName);
|
||||
if (tokenFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]");
|
||||
}
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilterName);
|
||||
} else {
|
||||
tokenFilterFactories[i] = analysisService.tokenFilter(tokenFilterName);
|
||||
if (tokenFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
|
||||
}
|
||||
}
|
||||
if (tokenFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
tokenFilterFactories = getTokenFilterFactories(request, analysisService, analysisRegistry, environment, tokenFilterFactories);
|
||||
|
||||
CharFilterFactory[] charFilterFactories = new CharFilterFactory[0];
|
||||
if (request.charFilters() != null && request.charFilters().length > 0) {
|
||||
charFilterFactories = new CharFilterFactory[request.charFilters().length];
|
||||
for (int i = 0; i < request.charFilters().length; i++) {
|
||||
String charFilterName = request.charFilters()[i];
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilterName);
|
||||
if (charFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterName + "]");
|
||||
}
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilterName);
|
||||
} else {
|
||||
charFilterFactories[i] = analysisService.charFilter(charFilterName);
|
||||
if (charFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
|
||||
}
|
||||
}
|
||||
if (charFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find char filter under [" + charFilterName + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
charFilterFactories = getCharFilterFactories(request, analysisService, analysisRegistry, environment, charFilterFactories);
|
||||
|
||||
analyzer = new CustomAnalyzer(tokenizerFactory, charFilterFactories, tokenFilterFactories);
|
||||
closeAnalyzer = true;
|
||||
|
@ -407,8 +357,9 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("failed to analyze (charFiltering)", e);
|
||||
}
|
||||
if (len > 0)
|
||||
if (len > 0) {
|
||||
sb.append(buf, 0, len);
|
||||
}
|
||||
} while (len == BUFFER_SIZE);
|
||||
return sb.toString();
|
||||
}
|
||||
|
@ -436,7 +387,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
lastPosition = lastPosition + increment;
|
||||
}
|
||||
tokens.add(new AnalyzeResponse.AnalyzeToken(term.toString(), lastPosition, lastOffset + offset.startOffset(),
|
||||
lastOffset +offset.endOffset(), type.type(), extractExtendedAttributes(stream, includeAttributes)));
|
||||
lastOffset + offset.endOffset(), type.type(), extractExtendedAttributes(stream, includeAttributes)));
|
||||
|
||||
}
|
||||
stream.end();
|
||||
|
@ -470,27 +421,164 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
|||
private static Map<String, Object> extractExtendedAttributes(TokenStream stream, final Set<String> includeAttributes) {
|
||||
final Map<String, Object> extendedAttributes = new TreeMap<>();
|
||||
|
||||
stream.reflectWith(new AttributeReflector() {
|
||||
@Override
|
||||
public void reflect(Class<? extends Attribute> attClass, String key, Object value) {
|
||||
if (CharTermAttribute.class.isAssignableFrom(attClass))
|
||||
return;
|
||||
if (PositionIncrementAttribute.class.isAssignableFrom(attClass))
|
||||
return;
|
||||
if (OffsetAttribute.class.isAssignableFrom(attClass))
|
||||
return;
|
||||
if (TypeAttribute.class.isAssignableFrom(attClass))
|
||||
return;
|
||||
if (includeAttributes == null || includeAttributes.isEmpty() || includeAttributes.contains(key.toLowerCase(Locale.ROOT))) {
|
||||
if (value instanceof BytesRef) {
|
||||
final BytesRef p = (BytesRef) value;
|
||||
value = p.toString();
|
||||
}
|
||||
extendedAttributes.put(key, value);
|
||||
stream.reflectWith((attClass, key, value) -> {
|
||||
if (CharTermAttribute.class.isAssignableFrom(attClass)) {
|
||||
return;
|
||||
}
|
||||
if (PositionIncrementAttribute.class.isAssignableFrom(attClass)) {
|
||||
return;
|
||||
}
|
||||
if (OffsetAttribute.class.isAssignableFrom(attClass)) {
|
||||
return;
|
||||
}
|
||||
if (TypeAttribute.class.isAssignableFrom(attClass)) {
|
||||
return;
|
||||
}
|
||||
if (includeAttributes == null || includeAttributes.isEmpty() || includeAttributes.contains(key.toLowerCase(Locale.ROOT))) {
|
||||
if (value instanceof BytesRef) {
|
||||
final BytesRef p = (BytesRef) value;
|
||||
value = p.toString();
|
||||
}
|
||||
extendedAttributes.put(key, value);
|
||||
}
|
||||
});
|
||||
|
||||
return extendedAttributes;
|
||||
}
|
||||
|
||||
private static CharFilterFactory[] getCharFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
|
||||
Environment environment, CharFilterFactory[] charFilterFactories) throws IOException {
|
||||
if (request.charFilters() != null && request.charFilters().size() > 0) {
|
||||
charFilterFactories = new CharFilterFactory[request.charFilters().size()];
|
||||
for (int i = 0; i < request.charFilters().size(); i++) {
|
||||
final AnalyzeRequest.NameOrDefinition charFilter = request.charFilters().get(i);
|
||||
// parse anonymous settings
|
||||
if (charFilter.definition != null) {
|
||||
Settings settings = getAnonymousSettings(charFilter.definition);
|
||||
String charFilterTypeName = settings.get("type");
|
||||
if (charFilterTypeName == null) {
|
||||
throw new IllegalArgumentException("Missing [type] setting for anonymous char filter: " + charFilter.definition);
|
||||
}
|
||||
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory =
|
||||
analysisRegistry.getCharFilterProvider(charFilterTypeName);
|
||||
if (charFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global char filter under [" + charFilterTypeName + "]");
|
||||
}
|
||||
// Need to set anonymous "name" of char_filter
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_charfilter_[" + i + "]", settings);
|
||||
} else {
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<CharFilterFactory> charFilterFactoryFactory = analysisRegistry.getCharFilterProvider(charFilter.name);
|
||||
if (charFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global char filter under [" + charFilter.name + "]");
|
||||
}
|
||||
charFilterFactories[i] = charFilterFactoryFactory.get(environment, charFilter.name);
|
||||
} else {
|
||||
charFilterFactories[i] = analysisService.charFilter(charFilter.name);
|
||||
if (charFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (charFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find char filter under [" + charFilter.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
return charFilterFactories;
|
||||
}
|
||||
|
||||
private static TokenFilterFactory[] getTokenFilterFactories(AnalyzeRequest request, AnalysisService analysisService, AnalysisRegistry analysisRegistry,
|
||||
Environment environment, TokenFilterFactory[] tokenFilterFactories) throws IOException {
|
||||
if (request.tokenFilters() != null && request.tokenFilters().size() > 0) {
|
||||
tokenFilterFactories = new TokenFilterFactory[request.tokenFilters().size()];
|
||||
for (int i = 0; i < request.tokenFilters().size(); i++) {
|
||||
final AnalyzeRequest.NameOrDefinition tokenFilter = request.tokenFilters().get(i);
|
||||
// parse anonymous settings
|
||||
if (tokenFilter.definition != null) {
|
||||
Settings settings = getAnonymousSettings(tokenFilter.definition);
|
||||
String filterTypeName = settings.get("type");
|
||||
if (filterTypeName == null) {
|
||||
throw new IllegalArgumentException("Missing [type] setting for anonymous token filter: " + tokenFilter.definition);
|
||||
}
|
||||
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory =
|
||||
analysisRegistry.getTokenFilterProvider(filterTypeName);
|
||||
if (tokenFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global token filter under [" + filterTypeName + "]");
|
||||
}
|
||||
// Need to set anonymous "name" of tokenfilter
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenfilter_[" + i + "]", settings);
|
||||
} else {
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<TokenFilterFactory> tokenFilterFactoryFactory = analysisRegistry.getTokenFilterProvider(tokenFilter.name);
|
||||
|
||||
if (tokenFilterFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilter.name + "]");
|
||||
}
|
||||
tokenFilterFactories[i] = tokenFilterFactoryFactory.get(environment, tokenFilter.name);
|
||||
} else {
|
||||
tokenFilterFactories[i] = analysisService.tokenFilter(tokenFilter.name);
|
||||
if (tokenFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find token filter under [" + tokenFilter.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
if (tokenFilterFactories[i] == null) {
|
||||
throw new IllegalArgumentException("failed to find or create token filter under [" + tokenFilter.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
return tokenFilterFactories;
|
||||
}
|
||||
|
||||
private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, AnalysisService analysisService,
|
||||
AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
|
||||
TokenizerFactory tokenizerFactory;
|
||||
final AnalyzeRequest.NameOrDefinition tokenizer = request.tokenizer();
|
||||
// parse anonymous settings
|
||||
if (tokenizer.definition != null) {
|
||||
Settings settings = getAnonymousSettings(tokenizer.definition);
|
||||
String tokenizerTypeName = settings.get("type");
|
||||
if (tokenizerTypeName == null) {
|
||||
throw new IllegalArgumentException("Missing [type] setting for anonymous tokenizer: " + tokenizer.definition);
|
||||
}
|
||||
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory =
|
||||
analysisRegistry.getTokenizerProvider(tokenizerTypeName);
|
||||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizerTypeName + "]");
|
||||
}
|
||||
// Need to set anonymous "name" of tokenizer
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenizer", settings);
|
||||
} else {
|
||||
if (analysisService == null) {
|
||||
AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name);
|
||||
if (tokenizerFactoryFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizer.name + "]");
|
||||
}
|
||||
tokenizerFactory = tokenizerFactoryFactory.get(environment, tokenizer.name);
|
||||
} else {
|
||||
tokenizerFactory = analysisService.tokenizer(tokenizer.name);
|
||||
if (tokenizerFactory == null) {
|
||||
throw new IllegalArgumentException("failed to find tokenizer under [" + tokenizer.name + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
return tokenizerFactory;
|
||||
}
|
||||
|
||||
private static IndexSettings getNaIndexSettings(Settings settings) {
|
||||
IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build();
|
||||
return new IndexSettings(metaData, Settings.EMPTY);
|
||||
}
|
||||
|
||||
private static Settings getAnonymousSettings(Settings providerSetting) {
|
||||
return Settings.builder().put(providerSetting)
|
||||
// for _na_
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())
|
||||
.build();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -55,6 +56,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
|
||||
private final Set<ClusterBlock> blocks = new HashSet<>();
|
||||
|
||||
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
|
||||
|
||||
|
||||
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) {
|
||||
this.originalMessage = originalMessage;
|
||||
|
@ -98,6 +101,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
return this;
|
||||
}
|
||||
|
||||
public CreateIndexClusterStateUpdateRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
return this;
|
||||
}
|
||||
|
||||
public TransportMessage originalMessage() {
|
||||
return originalMessage;
|
||||
}
|
||||
|
@ -142,4 +150,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
|
|||
public boolean updateAllTypes() {
|
||||
return updateAllTypes;
|
||||
}
|
||||
|
||||
public ActiveShardCount waitForActiveShards() {
|
||||
return waitForActiveShards;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionRequestValidationException;
|
|||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -77,6 +78,8 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
|
||||
private boolean updateAllTypes = false;
|
||||
|
||||
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
|
||||
|
||||
public CreateIndexRequest() {
|
||||
}
|
||||
|
||||
|
@ -440,6 +443,30 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
return this;
|
||||
}
|
||||
|
||||
public ActiveShardCount waitForActiveShards() {
|
||||
return waitForActiveShards;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for index creation to return.
|
||||
* Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy
|
||||
* (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to
|
||||
* wait for all shards (primary and all replicas) to be active before returning.
|
||||
* Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public CreateIndexRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -462,6 +489,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
aliases.add(Alias.read(in));
|
||||
}
|
||||
updateAllTypes = in.readBoolean();
|
||||
waitForActiveShards = ActiveShardCount.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -486,5 +514,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
|
|||
alias.writeTo(out);
|
||||
}
|
||||
out.writeBoolean(updateAllTypes);
|
||||
waitForActiveShards.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -249,4 +250,23 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
|
|||
request.updateAllTypes(updateAllTypes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for index creation to return.
|
||||
* Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy
|
||||
* (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to
|
||||
* wait for all shards (primary and all replicas) to be active before returning.
|
||||
* Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public CreateIndexRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
request.waitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.create;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -30,22 +31,41 @@ import java.io.IOException;
|
|||
*/
|
||||
public class CreateIndexResponse extends AcknowledgedResponse {
|
||||
|
||||
private boolean shardsAcked;
|
||||
|
||||
protected CreateIndexResponse() {
|
||||
}
|
||||
|
||||
protected CreateIndexResponse(boolean acknowledged) {
|
||||
protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked) {
|
||||
super(acknowledged);
|
||||
assert acknowledged || shardsAcked == false; // if its not acknowledged, then shards acked should be false too
|
||||
this.shardsAcked = shardsAcked;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
readAcknowledged(in);
|
||||
shardsAcked = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
writeAcknowledged(out);
|
||||
out.writeBoolean(shardsAcked);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the requisite number of shards were started before
|
||||
* returning from the index creation operation. If {@link #isAcknowledged()}
|
||||
* is false, then this also returns false.
|
||||
*/
|
||||
public boolean isShardsAcked() {
|
||||
return shardsAcked;
|
||||
}
|
||||
|
||||
public void addCustomFields(XContentBuilder builder) throws IOException {
|
||||
builder.field("shards_acknowledged", isShardsAcked());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
|
@ -31,7 +30,6 @@ import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -77,24 +75,12 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
|
|||
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.updateAllTypes())
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||
.settings(request.settings()).mappings(request.mappings())
|
||||
.aliases(request.aliases()).customs(request.customs());
|
||||
.aliases(request.aliases()).customs(request.customs())
|
||||
.waitForActiveShards(request.waitForActiveShards());
|
||||
|
||||
createIndexService.createIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
listener.onResponse(new CreateIndexResponse(response.isAcknowledged()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
if (t instanceof IndexAlreadyExistsException) {
|
||||
logger.trace("[{}] failed to create", t, request.index());
|
||||
} else {
|
||||
logger.debug("[{}] failed to create", t, request.index());
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
createIndexService.createIndex(updateRequest, ActionListener.wrap(response ->
|
||||
listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcked())),
|
||||
listener::onFailure));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchParseException;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
@ -206,4 +207,22 @@ public class RolloverRequest extends AcknowledgedRequest<RolloverRequest> implem
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for creation of the
|
||||
* new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
|
||||
* wait for one shard copy (the primary) to become active. Set this value to
|
||||
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
|
||||
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.createIndexRequest.waitForActiveShards(waitForActiveShards);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.action.admin.indices.rollover;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -70,4 +71,23 @@ public class RolloverRequestBuilder extends MasterNodeOperationRequestBuilder<Ro
|
|||
this.request.getCreateIndexRequest().mapping(type, source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for creation of the
|
||||
* new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
|
||||
* wait for one shard copy (the primary) to become active. Set this value to
|
||||
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
|
||||
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public RolloverRequestBuilder waitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.request.setWaitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,22 +39,28 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
private static final String DRY_RUN = "dry_run";
|
||||
private static final String ROLLED_OVER = "rolled_over";
|
||||
private static final String CONDITIONS = "conditions";
|
||||
private static final String ACKNOWLEDGED = "acknowledged";
|
||||
private static final String SHARDS_ACKED = "shards_acknowledged";
|
||||
|
||||
private String oldIndex;
|
||||
private String newIndex;
|
||||
private Set<Map.Entry<String, Boolean>> conditionStatus;
|
||||
private boolean dryRun;
|
||||
private boolean rolledOver;
|
||||
private boolean acknowledged;
|
||||
private boolean shardsAcked;
|
||||
|
||||
RolloverResponse() {
|
||||
}
|
||||
|
||||
RolloverResponse(String oldIndex, String newIndex, Set<Condition.Result> conditionResults,
|
||||
boolean dryRun, boolean rolledOver) {
|
||||
boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcked) {
|
||||
this.oldIndex = oldIndex;
|
||||
this.newIndex = newIndex;
|
||||
this.dryRun = dryRun;
|
||||
this.rolledOver = rolledOver;
|
||||
this.acknowledged = acknowledged;
|
||||
this.shardsAcked = shardsAcked;
|
||||
this.conditionStatus = conditionResults.stream()
|
||||
.map(result -> new AbstractMap.SimpleEntry<>(result.condition.toString(), result.matched))
|
||||
.collect(Collectors.toSet());
|
||||
|
@ -89,12 +95,31 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns if the rollover was not simulated and the conditions were met
|
||||
* Returns true if the rollover was not simulated and the conditions were met
|
||||
*/
|
||||
public boolean isRolledOver() {
|
||||
return rolledOver;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the creation of the new rollover index and switching of the
|
||||
* alias to the newly created index was successful, and returns false otherwise.
|
||||
* If {@link #isDryRun()} is true, then this will also return false. If this
|
||||
* returns false, then {@link #isShardsAcked()} will also return false.
|
||||
*/
|
||||
public boolean isAcknowledged() {
|
||||
return acknowledged;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true if the requisite number of shards were started in the newly
|
||||
* created rollover index before returning. If {@link #isAcknowledged()} is
|
||||
* false, then this will also return false.
|
||||
*/
|
||||
public boolean isShardsAcked() {
|
||||
return shardsAcked;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -110,6 +135,8 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
conditionStatus = conditions;
|
||||
dryRun = in.readBoolean();
|
||||
rolledOver = in.readBoolean();
|
||||
acknowledged = in.readBoolean();
|
||||
shardsAcked = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -124,6 +151,8 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
}
|
||||
out.writeBoolean(dryRun);
|
||||
out.writeBoolean(rolledOver);
|
||||
out.writeBoolean(acknowledged);
|
||||
out.writeBoolean(shardsAcked);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -132,6 +161,8 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
|
|||
builder.field(NEW_INDEX, newIndex);
|
||||
builder.field(ROLLED_OVER, rolledOver);
|
||||
builder.field(DRY_RUN, dryRun);
|
||||
builder.field(ACKNOWLEDGED, acknowledged);
|
||||
builder.field(SHARDS_ACKED, shardsAcked);
|
||||
builder.startObject(CONDITIONS);
|
||||
for (Map.Entry<String, Boolean> entry : conditionStatus) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
|
|
|
@ -25,11 +25,12 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpda
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.ActiveShardsObserver;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.AliasAction;
|
||||
|
@ -58,6 +59,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
private static final Pattern INDEX_NAME_PATTERN = Pattern.compile("^.*-(\\d)+$");
|
||||
private final MetaDataCreateIndexService createIndexService;
|
||||
private final MetaDataIndexAliasesService indexAliasesService;
|
||||
private final ActiveShardsObserver activeShardsObserver;
|
||||
private final Client client;
|
||||
|
||||
@Inject
|
||||
|
@ -70,6 +72,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
this.createIndexService = createIndexService;
|
||||
this.indexAliasesService = indexAliasesService;
|
||||
this.client = client;
|
||||
this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -110,42 +113,34 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
: generateRolloverIndexName(sourceIndexName);
|
||||
if (rolloverRequest.isDryRun()) {
|
||||
listener.onResponse(
|
||||
new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false));
|
||||
new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false, false, false));
|
||||
return;
|
||||
}
|
||||
if (conditionResults.size() == 0 || conditionResults.stream().anyMatch(result -> result.matched)) {
|
||||
createIndexService.createIndex(prepareCreateIndexRequest(rolloverIndexName, rolloverRequest),
|
||||
new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
// switch the alias to point to the newly created index
|
||||
indexAliasesService.indicesAliases(
|
||||
prepareRolloverAliasesUpdateRequest(sourceIndexName, rolloverIndexName,
|
||||
rolloverRequest),
|
||||
new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) {
|
||||
listener.onResponse(
|
||||
new RolloverResponse(sourceIndexName, rolloverIndexName,
|
||||
conditionResults, false, true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(rolloverIndexName, rolloverRequest);
|
||||
createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> {
|
||||
// switch the alias to point to the newly created index
|
||||
indexAliasesService.indicesAliases(
|
||||
prepareRolloverAliasesUpdateRequest(sourceIndexName, rolloverIndexName,
|
||||
rolloverRequest),
|
||||
ActionListener.wrap(aliasClusterStateUpdateResponse -> {
|
||||
if (aliasClusterStateUpdateResponse.isAcknowledged()) {
|
||||
activeShardsObserver.waitForActiveShards(rolloverIndexName,
|
||||
rolloverRequest.getCreateIndexRequest().waitForActiveShards(),
|
||||
rolloverRequest.masterNodeTimeout(),
|
||||
isShardsAcked -> listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName,
|
||||
conditionResults, false, true, true, isShardsAcked)),
|
||||
listener::onFailure);
|
||||
} else {
|
||||
listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults,
|
||||
false, true, false, false));
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}, listener::onFailure));
|
||||
} else {
|
||||
// conditions not met
|
||||
listener.onResponse(
|
||||
new RolloverResponse(sourceIndexName, sourceIndexName, conditionResults, false, false)
|
||||
new RolloverResponse(sourceIndexName, sourceIndexName, conditionResults, false, false, false, false)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -216,6 +211,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
|
|||
.masterNodeTimeout(createIndexRequest.masterNodeTimeout())
|
||||
.settings(createIndexRequest.settings())
|
||||
.aliases(createIndexRequest.aliases())
|
||||
.waitForActiveShards(ActiveShardCount.NONE) // not waiting for shards here, will wait on the alias switch operation
|
||||
.mappings(createIndexRequest.mappings());
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
|||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.health.ClusterShardHealth;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
@ -93,12 +94,14 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
logger.trace("using cluster state version [{}] to determine shards", state.version());
|
||||
// collect relevant shard ids of the requested indices for fetching store infos
|
||||
for (String index : concreteIndices) {
|
||||
IndexMetaData indexMetaData = state.metaData().index(index);
|
||||
IndexRoutingTable indexShardRoutingTables = routingTables.index(index);
|
||||
if (indexShardRoutingTables == null) {
|
||||
continue;
|
||||
}
|
||||
for (IndexShardRoutingTable routing : indexShardRoutingTables) {
|
||||
ClusterShardHealth shardHealth = new ClusterShardHealth(routing.shardId().id(), routing);
|
||||
final int shardId = routing.shardId().id();
|
||||
ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing, indexMetaData);
|
||||
if (request.shardStatuses().contains(shardHealth.getStatus())) {
|
||||
shardIdsToFetch.add(routing.shardId());
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchParseException;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
|
@ -36,7 +37,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
@ -126,6 +126,24 @@ public class ShrinkRequest extends AcknowledgedRequest<ShrinkRequest> implements
|
|||
return sourceIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for creation of the
|
||||
* new shrunken index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
|
||||
* wait for one shard copy (the primary) to become active. Set this value to
|
||||
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
|
||||
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link ShrinkResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.getShrinkIndexRequest().waitForActiveShards(waitForActiveShards);
|
||||
}
|
||||
|
||||
public void source(BytesReference source) {
|
||||
XContentType xContentType = XContentFactory.xContentType(source);
|
||||
if (xContentType != null) {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.action.admin.indices.shrink;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -44,4 +45,23 @@ public class ShrinkRequestBuilder extends AcknowledgedRequestBuilder<ShrinkReque
|
|||
this.request.getShrinkIndexRequest().settings(settings);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for creation of the
|
||||
* new shrunken index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
|
||||
* wait for one shard copy (the primary) to become active. Set this value to
|
||||
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
|
||||
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Index creation will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning. Check {@link ShrinkResponse#isShardsAcked()} to
|
||||
* determine if the requisite shard copies were all started before returning or timing out.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public ShrinkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
|
||||
this.request.setWaitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ public final class ShrinkResponse extends CreateIndexResponse {
|
|||
ShrinkResponse() {
|
||||
}
|
||||
|
||||
ShrinkResponse(boolean acknowledged) {
|
||||
super(acknowledged);
|
||||
ShrinkResponse(boolean acknowledged, boolean shardsAcked) {
|
||||
super(acknowledged, shardsAcked);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
|
@ -40,7 +39,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -93,22 +91,8 @@ public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkReque
|
|||
IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i);
|
||||
return shard == null ? null : shard.getPrimary().getDocs();
|
||||
}, indexNameExpressionResolver);
|
||||
createIndexService.createIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
listener.onResponse(new ShrinkResponse(response.isAcknowledged()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
if (t instanceof IndexAlreadyExistsException) {
|
||||
logger.trace("[{}] failed to create shrink index", t, updateRequest.index());
|
||||
} else {
|
||||
logger.debug("[{}] failed to create shrink index", t, updateRequest.index());
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
createIndexService.createIndex(updateRequest, ActionListener.wrap(response ->
|
||||
listener.onResponse(new ShrinkResponse(response.isAcknowledged(), response.isShardsAcked())), listener::onFailure));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -162,6 +146,7 @@ public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkReque
|
|||
.settings(targetIndex.settings())
|
||||
.aliases(targetIndex.aliases())
|
||||
.customs(targetIndex.customs())
|
||||
.waitForActiveShards(targetIndex.waitForActiveShards())
|
||||
.shrinkFrom(metaData.getIndex());
|
||||
}
|
||||
|
||||
|
|
|
@ -119,29 +119,21 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
|
||||
if (needToCheck()) {
|
||||
// Keep track of all unique indices and all unique types per index for the create index requests:
|
||||
final Map<String, Set<String>> indicesAndTypes = new HashMap<>();
|
||||
final Set<String> autoCreateIndices = new HashSet<>();
|
||||
for (ActionRequest request : bulkRequest.requests) {
|
||||
if (request instanceof DocumentRequest) {
|
||||
DocumentRequest req = (DocumentRequest) request;
|
||||
Set<String> types = indicesAndTypes.get(req.index());
|
||||
if (types == null) {
|
||||
indicesAndTypes.put(req.index(), types = new HashSet<>());
|
||||
}
|
||||
types.add(req.type());
|
||||
autoCreateIndices.add(req.index());
|
||||
} else {
|
||||
throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName());
|
||||
}
|
||||
}
|
||||
final AtomicInteger counter = new AtomicInteger(indicesAndTypes.size());
|
||||
final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
|
||||
ClusterState state = clusterService.state();
|
||||
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
|
||||
final String index = entry.getKey();
|
||||
for (String index : autoCreateIndices) {
|
||||
if (shouldAutoCreate(index, state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(index);
|
||||
for (String type : entry.getValue()) {
|
||||
createIndexRequest.mapping(type);
|
||||
}
|
||||
createIndexRequest.cause("auto(bulk api)");
|
||||
createIndexRequest.masterNodeTimeout(bulkRequest.timeout());
|
||||
createIndexAction.execute(createIndexRequest, new ActionListener<CreateIndexResponse>() {
|
||||
|
|
|
@ -68,6 +68,23 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
return this.type;
|
||||
}
|
||||
|
||||
public String getDisplayType() {
|
||||
switch (type) {
|
||||
case 0:
|
||||
return "integer";
|
||||
case 1:
|
||||
return "float";
|
||||
case 2:
|
||||
return "date";
|
||||
case 3:
|
||||
return "string";
|
||||
case 4:
|
||||
return "ip";
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown type.");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the total number of documents.
|
||||
*
|
||||
|
@ -220,23 +237,24 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields.MAX_DOC, maxDoc);
|
||||
builder.field(Fields.DOC_COUNT, docCount);
|
||||
builder.field(Fields.DENSITY, getDensity());
|
||||
builder.field(Fields.SUM_DOC_FREQ, sumDocFreq);
|
||||
builder.field(Fields.SUM_TOTAL_TERM_FREQ, sumTotalTermFreq);
|
||||
builder.field(Fields.SEARCHABLE, isSearchable);
|
||||
builder.field(Fields.AGGREGATABLE, isAggregatable);
|
||||
builder.field(TYPE_FIELD, getDisplayType());
|
||||
builder.field(MAX_DOC_FIELD, maxDoc);
|
||||
builder.field(DOC_COUNT_FIELD, docCount);
|
||||
builder.field(DENSITY_FIELD, getDensity());
|
||||
builder.field(SUM_DOC_FREQ_FIELD, sumDocFreq);
|
||||
builder.field(SUM_TOTAL_TERM_FREQ_FIELD, sumTotalTermFreq);
|
||||
builder.field(SEARCHABLE_FIELD, isSearchable);
|
||||
builder.field(AGGREGATABLE_FIELD, isAggregatable);
|
||||
toInnerXContent(builder);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
protected void toInnerXContent(XContentBuilder builder) throws IOException {
|
||||
builder.field(Fields.MIN_VALUE, getMinValue());
|
||||
builder.field(Fields.MIN_VALUE_AS_STRING, getMinValueAsString());
|
||||
builder.field(Fields.MAX_VALUE, getMaxValue());
|
||||
builder.field(Fields.MAX_VALUE_AS_STRING, getMaxValueAsString());
|
||||
builder.field(MIN_VALUE_FIELD, getMinValue());
|
||||
builder.field(MIN_VALUE_AS_STRING_FIELD, getMinValueAsString());
|
||||
builder.field(MAX_VALUE_FIELD, getMaxValue());
|
||||
builder.field(MAX_VALUE_AS_STRING_FIELD, getMaxValueAsString());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -484,8 +502,8 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
protected void toInnerXContent(XContentBuilder builder) throws IOException {
|
||||
builder.field(Fields.MIN_VALUE, getMinValueAsString());
|
||||
builder.field(Fields.MAX_VALUE, getMaxValueAsString());
|
||||
builder.field(MIN_VALUE_FIELD, getMinValueAsString());
|
||||
builder.field(MAX_VALUE_FIELD, getMaxValueAsString());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -598,34 +616,16 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
public static String typeName(byte type) {
|
||||
switch (type) {
|
||||
case 0:
|
||||
return "whole-number";
|
||||
case 1:
|
||||
return "floating-point";
|
||||
case 2:
|
||||
return "date";
|
||||
case 3:
|
||||
return "text";
|
||||
case 4:
|
||||
return "ip";
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown type.");
|
||||
}
|
||||
}
|
||||
|
||||
private static final class Fields {
|
||||
static final String MAX_DOC = new String("max_doc");
|
||||
static final String DOC_COUNT = new String("doc_count");
|
||||
static final String DENSITY = new String("density");
|
||||
static final String SUM_DOC_FREQ = new String("sum_doc_freq");
|
||||
static final String SUM_TOTAL_TERM_FREQ = new String("sum_total_term_freq");
|
||||
static final String SEARCHABLE = new String("searchable");
|
||||
static final String AGGREGATABLE = new String("aggregatable");
|
||||
static final String MIN_VALUE = new String("min_value");
|
||||
static final String MIN_VALUE_AS_STRING = new String("min_value_as_string");
|
||||
static final String MAX_VALUE = new String("max_value");
|
||||
static final String MAX_VALUE_AS_STRING = new String("max_value_as_string");
|
||||
}
|
||||
static final String TYPE_FIELD = new String("type");
|
||||
static final String MAX_DOC_FIELD = new String("max_doc");
|
||||
static final String DOC_COUNT_FIELD = new String("doc_count");
|
||||
static final String DENSITY_FIELD = new String("density");
|
||||
static final String SUM_DOC_FREQ_FIELD = new String("sum_doc_freq");
|
||||
static final String SUM_TOTAL_TERM_FREQ_FIELD = new String("sum_total_term_freq");
|
||||
static final String SEARCHABLE_FIELD = new String("searchable");
|
||||
static final String AGGREGATABLE_FIELD = new String("aggregatable");
|
||||
static final String MIN_VALUE_FIELD = new String("min_value");
|
||||
static final String MIN_VALUE_AS_STRING_FIELD = new String("min_value_as_string");
|
||||
static final String MAX_VALUE_FIELD = new String("max_value");
|
||||
static final String MAX_VALUE_AS_STRING_FIELD = new String("max_value_as_string");
|
||||
}
|
||||
|
|
|
@ -116,9 +116,9 @@ public class TransportFieldStatsAction extends
|
|||
Arrays.sort(fields, (o1, o2) -> Byte.compare(o1.getType(), o2.getType()));
|
||||
conflicts.put(entry.getKey(),
|
||||
"Field [" + entry.getKey() + "] of type [" +
|
||||
FieldStats.typeName(fields[0].getType()) +
|
||||
fields[0].getDisplayType() +
|
||||
"] conflicts with existing field of type [" +
|
||||
FieldStats.typeName(fields[1].getType()) +
|
||||
fields[1].getDisplayType() +
|
||||
"] in other index.");
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -91,7 +91,6 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
|
|||
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(request.index());
|
||||
createIndexRequest.mapping(request.type());
|
||||
createIndexRequest.cause("auto(index api)");
|
||||
createIndexRequest.masterNodeTimeout(request.timeout());
|
||||
createIndexAction.execute(task, createIndexRequest, new ActionListener<CreateIndexResponse>() {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.elasticsearch.action.ingest;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -29,47 +30,48 @@ import org.elasticsearch.ingest.IngestDocument;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
public class SimulateProcessorResult implements Writeable, ToXContent {
|
||||
class SimulateProcessorResult implements Writeable, ToXContent {
|
||||
private final String processorTag;
|
||||
private final WriteableIngestDocument ingestDocument;
|
||||
private final Exception failure;
|
||||
|
||||
public SimulateProcessorResult(String processorTag, IngestDocument ingestDocument) {
|
||||
SimulateProcessorResult(String processorTag, IngestDocument ingestDocument, Exception failure) {
|
||||
this.processorTag = processorTag;
|
||||
this.ingestDocument = new WriteableIngestDocument(ingestDocument);
|
||||
this.failure = null;
|
||||
this.ingestDocument = (ingestDocument == null) ? null : new WriteableIngestDocument(ingestDocument);
|
||||
this.failure = failure;
|
||||
}
|
||||
|
||||
public SimulateProcessorResult(String processorTag, Exception failure) {
|
||||
this.processorTag = processorTag;
|
||||
this.failure = failure;
|
||||
this.ingestDocument = null;
|
||||
SimulateProcessorResult(String processorTag, IngestDocument ingestDocument) {
|
||||
this(processorTag, ingestDocument, null);
|
||||
}
|
||||
|
||||
SimulateProcessorResult(String processorTag, Exception failure) {
|
||||
this(processorTag, null, failure);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public SimulateProcessorResult(StreamInput in) throws IOException {
|
||||
SimulateProcessorResult(StreamInput in) throws IOException {
|
||||
this.processorTag = in.readString();
|
||||
if (in.readBoolean()) {
|
||||
this.failure = in.readException();
|
||||
this.ingestDocument = null;
|
||||
} else {
|
||||
this.ingestDocument = new WriteableIngestDocument(in);
|
||||
this.failure = null;
|
||||
} else {
|
||||
this.ingestDocument = null;
|
||||
}
|
||||
this.failure = in.readException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(processorTag);
|
||||
if (failure == null) {
|
||||
if (ingestDocument == null) {
|
||||
out.writeBoolean(false);
|
||||
ingestDocument.writeTo(out);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeException(failure);
|
||||
ingestDocument.writeTo(out);
|
||||
}
|
||||
out.writeException(failure);
|
||||
}
|
||||
|
||||
public IngestDocument getIngestDocument() {
|
||||
|
@ -90,14 +92,23 @@ public class SimulateProcessorResult implements Writeable, ToXContent {
|
|||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
|
||||
if (processorTag != null) {
|
||||
builder.field(ConfigurationUtils.TAG_KEY, processorTag);
|
||||
}
|
||||
if (failure == null) {
|
||||
ingestDocument.toXContent(builder, params);
|
||||
} else {
|
||||
|
||||
if (failure != null && ingestDocument != null) {
|
||||
builder.startObject("ignored_error");
|
||||
ElasticsearchException.renderException(builder, params, failure);
|
||||
builder.endObject();
|
||||
} else if (failure != null) {
|
||||
ElasticsearchException.renderException(builder, params, failure);
|
||||
}
|
||||
|
||||
if (ingestDocument != null) {
|
||||
ingestDocument.toXContent(builder, params);
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.ingest.IngestDocument;
|
|||
import org.elasticsearch.ingest.Processor;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -49,7 +48,7 @@ public final class TrackingResultProcessor implements Processor {
|
|||
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument)));
|
||||
} catch (Exception e) {
|
||||
if (ignoreFailure) {
|
||||
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument)));
|
||||
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument), e));
|
||||
} else {
|
||||
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), e));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,211 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* A class whose instances represent a value for counting the number
|
||||
* of active shard copies for a given shard in an index.
|
||||
*/
|
||||
public final class ActiveShardCount implements Writeable {
|
||||
|
||||
private static final int ACTIVE_SHARD_COUNT_DEFAULT = -2;
|
||||
private static final int ALL_ACTIVE_SHARDS = -1;
|
||||
|
||||
public static final ActiveShardCount DEFAULT = new ActiveShardCount(ACTIVE_SHARD_COUNT_DEFAULT);
|
||||
public static final ActiveShardCount ALL = new ActiveShardCount(ALL_ACTIVE_SHARDS);
|
||||
public static final ActiveShardCount NONE = new ActiveShardCount(0);
|
||||
public static final ActiveShardCount ONE = new ActiveShardCount(1);
|
||||
|
||||
private final int value;
|
||||
|
||||
private ActiveShardCount(final int value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an ActiveShardCount instance for the given value. The value is first validated to ensure
|
||||
* it is a valid shard count and throws an IllegalArgumentException if validation fails. Valid
|
||||
* values are any non-negative number. Directly use {@link ActiveShardCount#DEFAULT} for the
|
||||
* default value (which is one shard copy) or {@link ActiveShardCount#ALL} to specify all the shards.
|
||||
*/
|
||||
public static ActiveShardCount from(final int value) {
|
||||
if (value < 0) {
|
||||
throw new IllegalArgumentException("shard count cannot be a negative value");
|
||||
}
|
||||
return get(value);
|
||||
}
|
||||
|
||||
private static ActiveShardCount get(final int value) {
|
||||
switch (validateValue(value)) {
|
||||
case ACTIVE_SHARD_COUNT_DEFAULT:
|
||||
return DEFAULT;
|
||||
case ALL_ACTIVE_SHARDS:
|
||||
return ALL;
|
||||
case 1:
|
||||
return ONE;
|
||||
case 0:
|
||||
return NONE;
|
||||
default:
|
||||
return new ActiveShardCount(value);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(final StreamOutput out) throws IOException {
|
||||
out.writeInt(value);
|
||||
}
|
||||
|
||||
public static ActiveShardCount readFrom(final StreamInput in) throws IOException {
|
||||
return get(in.readInt());
|
||||
}
|
||||
|
||||
private static int validateValue(final int value) {
|
||||
if (value < 0 && value != ACTIVE_SHARD_COUNT_DEFAULT && value != ALL_ACTIVE_SHARDS) {
|
||||
throw new IllegalArgumentException("Invalid ActiveShardCount[" + value + "]");
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve this instance to an actual integer value for the number of active shard counts.
|
||||
* If {@link ActiveShardCount#ALL} is specified, then the given {@link IndexMetaData} is
|
||||
* used to determine what the actual active shard count should be. The default value indicates
|
||||
* one active shard.
|
||||
*/
|
||||
public int resolve(final IndexMetaData indexMetaData) {
|
||||
if (this == ActiveShardCount.DEFAULT) {
|
||||
return 1;
|
||||
} else if (this == ActiveShardCount.ALL) {
|
||||
return indexMetaData.getNumberOfReplicas() + 1;
|
||||
} else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the active shard count from the given string. Valid values are "all" for
|
||||
* all shard copies, null for the default value (which defaults to one shard copy),
|
||||
* or a numeric value greater than or equal to 0. Any other input will throw an
|
||||
* IllegalArgumentException.
|
||||
*/
|
||||
public static ActiveShardCount parseString(final String str) {
|
||||
if (str == null) {
|
||||
return ActiveShardCount.DEFAULT;
|
||||
} else if (str.equals("all")) {
|
||||
return ActiveShardCount.ALL;
|
||||
} else {
|
||||
int val;
|
||||
try {
|
||||
val = Integer.parseInt(str);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("cannot parse ActiveShardCount[" + str + "]", e);
|
||||
}
|
||||
return ActiveShardCount.from(val);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff the given cluster state's routing table contains enough active
|
||||
* shards to meet the required shard count represented by this instance.
|
||||
*/
|
||||
public boolean enoughShardsActive(final ClusterState clusterState, final String indexName) {
|
||||
if (this == ActiveShardCount.NONE) {
|
||||
// not waiting for any active shards
|
||||
return true;
|
||||
}
|
||||
final IndexMetaData indexMetaData = clusterState.metaData().index(indexName);
|
||||
if (indexMetaData == null) {
|
||||
// its possible the index was deleted while waiting for active shard copies,
|
||||
// in this case, we'll just consider it that we have enough active shard copies
|
||||
// and we can stop waiting
|
||||
return true;
|
||||
}
|
||||
final IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(indexName);
|
||||
assert indexRoutingTable != null;
|
||||
if (indexRoutingTable.allPrimaryShardsActive() == false) {
|
||||
// all primary shards aren't active yet
|
||||
return false;
|
||||
}
|
||||
for (final IntObjectCursor<IndexShardRoutingTable> shardRouting : indexRoutingTable.getShards()) {
|
||||
if (enoughShardsActive(shardRouting.value, indexMetaData) == false) {
|
||||
// not enough active shard copies yet
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns true iff the active shard count in the shard routing table is enough
|
||||
* to meet the required shard count represented by this instance.
|
||||
*/
|
||||
public boolean enoughShardsActive(final IndexShardRoutingTable shardRoutingTable, final IndexMetaData indexMetaData) {
|
||||
if (shardRoutingTable.activeShards().size() < resolve(indexMetaData)) {
|
||||
// not enough active shard copies yet
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Integer.hashCode(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
@SuppressWarnings("unchecked") ActiveShardCount that = (ActiveShardCount) o;
|
||||
return value == that.value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final String valStr;
|
||||
switch (value) {
|
||||
case ALL_ACTIVE_SHARDS:
|
||||
valStr = "ALL";
|
||||
break;
|
||||
case ACTIVE_SHARD_COUNT_DEFAULT:
|
||||
valStr = "DEFAULT";
|
||||
break;
|
||||
default:
|
||||
valStr = Integer.toString(value);
|
||||
}
|
||||
return "ActiveShardCount[" + valStr + "]";
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,105 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* This class provides primitives for waiting for a configured number of shards
|
||||
* to become active before sending a response on an {@link ActionListener}.
|
||||
*/
|
||||
public class ActiveShardsObserver extends AbstractComponent {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
public ActiveShardsObserver(final Settings settings, final ClusterService clusterService, final ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.threadPool = threadPool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Waits on the specified number of active shards to be started before executing the
|
||||
*
|
||||
* @param indexName the index to wait for active shards on
|
||||
* @param activeShardCount the number of active shards to wait on before returning
|
||||
* @param timeout the timeout value
|
||||
* @param onResult a function that is executed in response to the requisite shards becoming active or a timeout (whichever comes first)
|
||||
* @param onFailure a function that is executed in response to an error occurring during waiting for the active shards
|
||||
*/
|
||||
public void waitForActiveShards(final String indexName,
|
||||
final ActiveShardCount activeShardCount,
|
||||
final TimeValue timeout,
|
||||
final Consumer<Boolean> onResult,
|
||||
final Consumer<Exception> onFailure) {
|
||||
|
||||
// wait for the configured number of active shards to be allocated before executing the result consumer
|
||||
if (activeShardCount == ActiveShardCount.NONE) {
|
||||
// not waiting, so just run whatever we were to run when the waiting is
|
||||
onResult.accept(true);
|
||||
return;
|
||||
}
|
||||
|
||||
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
|
||||
if (activeShardCount.enoughShardsActive(observer.observedState(), indexName)) {
|
||||
onResult.accept(true);
|
||||
} else {
|
||||
final ClusterStateObserver.ChangePredicate shardsAllocatedPredicate =
|
||||
new ClusterStateObserver.ValidationPredicate() {
|
||||
@Override
|
||||
protected boolean validate(final ClusterState newState) {
|
||||
return activeShardCount.enoughShardsActive(newState, indexName);
|
||||
}
|
||||
};
|
||||
|
||||
final ClusterStateObserver.Listener observerListener = new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
onResult.accept(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
logger.debug("[{}] cluster service closed while waiting for enough shards to be started.", indexName);
|
||||
onFailure.accept(new NodeClosedException(clusterService.localNode()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
onResult.accept(false);
|
||||
}
|
||||
};
|
||||
|
||||
observer.waitForNextChange(observerListener, shardsAllocatedPredicate, timeout);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -59,7 +59,7 @@ public abstract class HandledTransportAction<Request extends ActionRequest<Reque
|
|||
|
||||
@Override
|
||||
public final void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
|
||||
// We already got the task created on the netty layer - no need to create it again on the transport layer
|
||||
// We already got the task created on the network layer - no need to create it again on the transport layer
|
||||
execute(task, request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response response) {
|
||||
|
|
|
@ -101,6 +101,10 @@ public abstract class TransportAction<Request extends ActionRequest<Request>, Re
|
|||
return task;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute the transport action on the local node, returning the {@link Task} used to track its execution and accepting a
|
||||
* {@link TaskListener} which listens for the completion of the action.
|
||||
*/
|
||||
public final Task execute(Request request, TaskListener<Response> listener) {
|
||||
Task task = taskManager.register("transport", actionName, request);
|
||||
execute(task, request, new ActionListener<Response>() {
|
||||
|
|
|
@ -39,8 +39,6 @@ import org.elasticsearch.common.xcontent.XContentType;
|
|||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptParameterParser;
|
||||
import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
|
||||
|
@ -637,8 +635,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
}
|
||||
|
||||
public UpdateRequest source(BytesReference source) throws Exception {
|
||||
ScriptParameterParser scriptParameterParser = new ScriptParameterParser();
|
||||
Map<String, Object> scriptParams = null;
|
||||
Script script = null;
|
||||
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
|
@ -649,11 +645,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if ("script".equals(currentFieldName) && token == XContentParser.Token.START_OBJECT) {
|
||||
//here we don't have settings available, unable to throw strict deprecation exceptions
|
||||
} else if ("script".equals(currentFieldName)) {
|
||||
script = Script.parse(parser, ParseFieldMatcher.EMPTY);
|
||||
} else if ("params".equals(currentFieldName)) {
|
||||
scriptParams = parser.map();
|
||||
} else if ("scripted_upsert".equals(currentFieldName)) {
|
||||
scriptedUpsert = parser.booleanValue();
|
||||
} else if ("upsert".equals(currentFieldName)) {
|
||||
|
@ -680,16 +673,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
if (fields != null) {
|
||||
fields(fields.toArray(new String[fields.size()]));
|
||||
}
|
||||
} else {
|
||||
//here we don't have settings available, unable to throw deprecation exceptions
|
||||
scriptParameterParser.token(currentFieldName, token, parser, ParseFieldMatcher.EMPTY);
|
||||
}
|
||||
}
|
||||
// Don't have a script using the new API so see if it is specified with the old API
|
||||
if (script == null) {
|
||||
ScriptParameterValue scriptValue = scriptParameterParser.getDefaultScriptParameterValue();
|
||||
if (scriptValue != null) {
|
||||
script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), scriptParams);
|
||||
}
|
||||
}
|
||||
if (script != null) {
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.elasticsearch.ElasticsearchException;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.common.PidFile;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -180,10 +179,10 @@ final class Bootstrap {
|
|||
};
|
||||
}
|
||||
|
||||
private static Environment initialSettings(boolean foreground, String pidFile, Map<String, String> esSettings) {
|
||||
private static Environment initialSettings(boolean foreground, Path pidFile, Map<String, String> esSettings) {
|
||||
Terminal terminal = foreground ? Terminal.DEFAULT : null;
|
||||
Settings.Builder builder = Settings.builder();
|
||||
if (Strings.hasLength(pidFile)) {
|
||||
if (pidFile != null) {
|
||||
builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile);
|
||||
}
|
||||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings);
|
||||
|
@ -215,7 +214,7 @@ final class Bootstrap {
|
|||
*/
|
||||
static void init(
|
||||
final boolean foreground,
|
||||
final String pidFile,
|
||||
final Path pidFile,
|
||||
final Map<String, String> esSettings) throws Exception {
|
||||
// Set the system property before anything has a chance to trigger its use
|
||||
initLoggerPrefix();
|
||||
|
@ -247,6 +246,12 @@ final class Bootstrap {
|
|||
// fail if somebody replaced the lucene jars
|
||||
checkLucene();
|
||||
|
||||
// install the default uncaught exception handler; must be done before security is
|
||||
// initialized as we do not want to grant the runtime permission
|
||||
// setDefaultUncaughtExceptionHandler
|
||||
Thread.setDefaultUncaughtExceptionHandler(
|
||||
new ElasticsearchUncaughtExceptionHandler(() -> Node.NODE_NAME_SETTING.get(settings)));
|
||||
|
||||
INSTANCE.setup(true, settings, environment);
|
||||
|
||||
INSTANCE.start();
|
||||
|
|
|
@ -104,6 +104,13 @@ final class BootstrapCheck {
|
|||
final List<String> errors = new ArrayList<>();
|
||||
final List<String> ignoredErrors = new ArrayList<>();
|
||||
|
||||
if (enforceLimits) {
|
||||
logger.info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
|
||||
}
|
||||
if (enforceLimits && ignoreSystemChecks) {
|
||||
logger.warn("enforcing bootstrap checks but ignoring system bootstrap checks, consider not ignoring system checks");
|
||||
}
|
||||
|
||||
for (final Check check : checks) {
|
||||
if (check.check()) {
|
||||
if ((!enforceLimits || (check.isSystemCheck() && ignoreSystemChecks)) && !check.alwaysEnforce()) {
|
||||
|
|
|
@ -21,6 +21,9 @@ package org.elasticsearch.bootstrap;
|
|||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import joptsimple.OptionSpecBuilder;
|
||||
import joptsimple.util.PathConverter;
|
||||
import joptsimple.util.PathProperties;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.SettingCommand;
|
||||
|
@ -29,6 +32,7 @@ import org.elasticsearch.cli.UserException;
|
|||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -37,22 +41,23 @@ import java.util.Map;
|
|||
*/
|
||||
class Elasticsearch extends SettingCommand {
|
||||
|
||||
private final OptionSpec<Void> versionOption;
|
||||
private final OptionSpec<Void> daemonizeOption;
|
||||
private final OptionSpec<String> pidfileOption;
|
||||
private final OptionSpecBuilder versionOption;
|
||||
private final OptionSpecBuilder daemonizeOption;
|
||||
private final OptionSpec<Path> pidfileOption;
|
||||
|
||||
// visible for testing
|
||||
Elasticsearch() {
|
||||
super("starts elasticsearch");
|
||||
// TODO: in jopt-simple 5.0, make this mutually exclusive with all other options
|
||||
versionOption = parser.acceptsAll(Arrays.asList("V", "version"),
|
||||
"Prints elasticsearch version information and exits");
|
||||
daemonizeOption = parser.acceptsAll(Arrays.asList("d", "daemonize"),
|
||||
"Starts Elasticsearch in the background");
|
||||
// TODO: in jopt-simple 5.0 this option type can be a Path
|
||||
"Starts Elasticsearch in the background")
|
||||
.availableUnless(versionOption);
|
||||
pidfileOption = parser.acceptsAll(Arrays.asList("p", "pidfile"),
|
||||
"Creates a pid file in the specified path on start")
|
||||
.withRequiredArg();
|
||||
.availableUnless(versionOption)
|
||||
.withRequiredArg()
|
||||
.withValuesConvertedBy(new PathConverter());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,12 +91,12 @@ class Elasticsearch extends SettingCommand {
|
|||
}
|
||||
|
||||
final boolean daemonize = options.has(daemonizeOption);
|
||||
final String pidFile = pidfileOption.value(options);
|
||||
final Path pidFile = pidfileOption.value(options);
|
||||
|
||||
init(daemonize, pidFile, settings);
|
||||
}
|
||||
|
||||
void init(final boolean daemonize, final String pidFile, final Map<String, String> esSettings) {
|
||||
void init(final boolean daemonize, final Path pidFile, final Map<String, String> esSettings) {
|
||||
try {
|
||||
Bootstrap.init(!daemonize, pidFile, esSettings);
|
||||
} catch (final Throwable t) {
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.lucene.index.MergePolicy;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
||||
import java.io.IOError;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
class ElasticsearchUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {
|
||||
|
||||
private final Supplier<String> loggingPrefixSupplier;
|
||||
|
||||
ElasticsearchUncaughtExceptionHandler(final Supplier<String> loggingPrefixSupplier) {
|
||||
this.loggingPrefixSupplier = Objects.requireNonNull(loggingPrefixSupplier);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void uncaughtException(Thread t, Throwable e) {
|
||||
if (isFatalUncaught(e)) {
|
||||
try {
|
||||
onFatalUncaught(t.getName(), e);
|
||||
} finally {
|
||||
// we use specific error codes in case the above notification failed, at least we
|
||||
// will have some indication of the error bringing us down
|
||||
if (e instanceof InternalError) {
|
||||
halt(128);
|
||||
} else if (e instanceof OutOfMemoryError) {
|
||||
halt(127);
|
||||
} else if (e instanceof StackOverflowError) {
|
||||
halt(126);
|
||||
} else if (e instanceof UnknownError) {
|
||||
halt(125);
|
||||
} else if (e instanceof IOError) {
|
||||
halt(124);
|
||||
} else {
|
||||
halt(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
onNonFatalUncaught(t.getName(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
static boolean isFatalUncaught(Throwable e) {
|
||||
return isFatalCause(e) || (e instanceof MergePolicy.MergeException && isFatalCause(e.getCause()));
|
||||
}
|
||||
|
||||
private static boolean isFatalCause(Throwable cause) {
|
||||
return cause instanceof Error;
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
void onFatalUncaught(final String threadName, final Throwable t) {
|
||||
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.error("fatal error in thread [{}], exiting", t, threadName);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
void onNonFatalUncaught(final String threadName, final Throwable t) {
|
||||
final ESLogger logger = Loggers.getLogger(ElasticsearchUncaughtExceptionHandler.class, loggingPrefixSupplier.get());
|
||||
logger.warn("uncaught exception in thread [{}]", t, threadName);
|
||||
}
|
||||
|
||||
// visible for testing
|
||||
@SuppressForbidden(reason = "halt")
|
||||
void halt(int status) {
|
||||
// we halt to prevent shutdown hooks from running
|
||||
Runtime.getRuntime().halt(status);
|
||||
}
|
||||
|
||||
}
|
|
@ -120,7 +120,7 @@ final class Security {
|
|||
Policy.setPolicy(new ESPolicy(createPermissions(environment), getPluginPermissions(environment), filterBadDefaults));
|
||||
|
||||
// enable security manager
|
||||
System.setSecurityManager(new SecureSM());
|
||||
System.setSecurityManager(new SecureSM(new String[] { "org.elasticsearch.bootstrap." }));
|
||||
|
||||
// do some basic tests
|
||||
selfTest();
|
||||
|
@ -285,7 +285,7 @@ final class Security {
|
|||
}
|
||||
|
||||
// loop through all profiles and add permissions for each one, if its valid.
|
||||
// (otherwise NettyTransport is lenient and ignores it)
|
||||
// (otherwise Netty transports are lenient and ignores it)
|
||||
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
|
||||
Settings profileSettings = entry.getValue();
|
||||
String name = entry.getKey();
|
||||
|
|
|
@ -41,7 +41,8 @@ public abstract class Command {
|
|||
|
||||
private final OptionSpec<Void> helpOption = parser.acceptsAll(Arrays.asList("h", "help"), "show help").forHelp();
|
||||
private final OptionSpec<Void> silentOption = parser.acceptsAll(Arrays.asList("s", "silent"), "show minimal output");
|
||||
private final OptionSpec<Void> verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output");
|
||||
private final OptionSpec<Void> verboseOption = parser.acceptsAll(Arrays.asList("v", "verbose"), "show verbose output")
|
||||
.availableUnless(silentOption);
|
||||
|
||||
public Command(String description) {
|
||||
this.description = description;
|
||||
|
@ -77,10 +78,6 @@ public abstract class Command {
|
|||
}
|
||||
|
||||
if (options.has(silentOption)) {
|
||||
if (options.has(verboseOption)) {
|
||||
// mutually exclusive, we can remove this with jopt-simple 5.0, which natively supports it
|
||||
throw new UserException(ExitCodes.USAGE, "Cannot specify -s and -v together");
|
||||
}
|
||||
terminal.setVerbosity(Terminal.Verbosity.SILENT);
|
||||
} else if (options.has(verboseOption)) {
|
||||
terminal.setVerbosity(Terminal.Verbosity.VERBOSE);
|
||||
|
|
|
@ -26,14 +26,17 @@ import org.elasticsearch.action.ActionRequestBuilder;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.GenericAction;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.support.AbstractClient;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.tasks.TaskListener;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
* Client that executes actions on the local node.
|
||||
*/
|
||||
public class NodeClient extends AbstractClient {
|
||||
|
||||
|
@ -52,10 +55,43 @@ public class NodeClient extends AbstractClient {
|
|||
// nothing really to do
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@Override
|
||||
public <Request extends ActionRequest<Request>, Response extends ActionResponse, RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>> void doExecute(
|
||||
Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
public < Request extends ActionRequest<Request>,
|
||||
Response extends ActionResponse,
|
||||
RequestBuilder extends ActionRequestBuilder<Request, Response, RequestBuilder>
|
||||
> void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
|
||||
// Discard the task because the Client interface doesn't use it.
|
||||
executeLocally(action, request, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute an {@link Action} locally, returning that {@link Task} used to track it, and linking an {@link ActionListener}. Prefer this
|
||||
* method if you don't need access to the task when listening for the response. This is the method used to implement the {@link Client}
|
||||
* interface.
|
||||
*/
|
||||
public < Request extends ActionRequest<Request>,
|
||||
Response extends ActionResponse
|
||||
> Task executeLocally(GenericAction<Request, Response> action, Request request, ActionListener<Response> listener) {
|
||||
return transportAction(action).execute(request, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute an {@link Action} locally, returning that {@link Task} used to track it, and linking an {@link TaskListener}. Prefer this
|
||||
* method if you need access to the task when listening for the response.
|
||||
*/
|
||||
public < Request extends ActionRequest<Request>,
|
||||
Response extends ActionResponse
|
||||
> Task executeLocally(GenericAction<Request, Response> action, Request request, TaskListener<Response> listener) {
|
||||
return transportAction(action).execute(request, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the {@link TransportAction} for an {@link Action}, throwing exceptions if the action isn't available.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private < Request extends ActionRequest<Request>,
|
||||
Response extends ActionResponse
|
||||
> TransportAction<Request, Response> transportAction(GenericAction<Request, Response> action) {
|
||||
if (actions == null) {
|
||||
throw new IllegalStateException("NodeClient has not been initialized");
|
||||
}
|
||||
|
@ -63,6 +99,6 @@ public class NodeClient extends AbstractClient {
|
|||
if (transportAction == null) {
|
||||
throw new IllegalStateException("failed to find action [" + action + "] to execute");
|
||||
}
|
||||
transportAction.execute(request, listener);
|
||||
return transportAction;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,9 @@ package org.elasticsearch.client.transport;
|
|||
|
||||
import java.io.Closeable;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -53,6 +56,7 @@ import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
|||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.PluginsService;
|
||||
import org.elasticsearch.plugins.SearchPlugin;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.threadpool.ExecutorBuilder;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -66,116 +70,120 @@ import org.elasticsearch.transport.TransportService;
|
|||
* The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is
|
||||
* started in client mode (only connects, no bind).
|
||||
*/
|
||||
public class TransportClient extends AbstractClient {
|
||||
public abstract class TransportClient extends AbstractClient {
|
||||
|
||||
/**
|
||||
* Handy method ot create a {@link org.elasticsearch.client.transport.TransportClient.Builder}.
|
||||
*/
|
||||
public static Builder builder() {
|
||||
return new Builder();
|
||||
private static PluginsService newPluginService(final Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
final Settings.Builder settingsBuilder = Settings.builder()
|
||||
.put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
|
||||
.put(InternalSettingsPreparer.prepareSettings(settings))
|
||||
.put(NetworkService.NETWORK_SERVER.getKey(), false)
|
||||
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
|
||||
return new PluginsService(settingsBuilder.build(), null, null, plugins);
|
||||
}
|
||||
|
||||
/**
|
||||
* A builder used to create an instance of the transport client.
|
||||
*/
|
||||
public static class Builder {
|
||||
protected static Collection<Class<? extends Plugin>> addPlugins(Collection<Class<? extends Plugin>> collection,
|
||||
Class<? extends Plugin>... plugins) {
|
||||
return addPlugins(collection, Arrays.asList(plugins));
|
||||
}
|
||||
|
||||
private Settings providedSettings = Settings.EMPTY;
|
||||
private List<Class<? extends Plugin>> pluginClasses = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* The settings to configure the transport client with.
|
||||
*/
|
||||
public Builder settings(Settings.Builder settings) {
|
||||
return settings(settings.build());
|
||||
}
|
||||
|
||||
/**
|
||||
* The settings to configure the transport client with.
|
||||
*/
|
||||
public Builder settings(Settings settings) {
|
||||
this.providedSettings = settings;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add the given plugin to the client when it is created.
|
||||
*/
|
||||
public Builder addPlugin(Class<? extends Plugin> pluginClass) {
|
||||
pluginClasses.add(pluginClass);
|
||||
return this;
|
||||
}
|
||||
|
||||
private PluginsService newPluginService(final Settings settings) {
|
||||
final Settings.Builder settingsBuilder = Settings.builder()
|
||||
.put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
|
||||
.put(InternalSettingsPreparer.prepareSettings(settings))
|
||||
.put(NetworkService.NETWORK_SERVER.getKey(), false)
|
||||
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
|
||||
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a new instance of the transport client.
|
||||
*/
|
||||
public TransportClient build() {
|
||||
final PluginsService pluginsService = newPluginService(providedSettings);
|
||||
final Settings settings = pluginsService.updatedSettings();
|
||||
final List<Closeable> resourcesToClose = new ArrayList<>();
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
|
||||
final NetworkService networkService = new NetworkService(settings);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
try {
|
||||
final List<Setting<?>> additionalSettings = new ArrayList<>();
|
||||
final List<String> additionalSettingsFilter = new ArrayList<>();
|
||||
additionalSettings.addAll(pluginsService.getPluginSettings());
|
||||
additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter());
|
||||
for (final ExecutorBuilder<?> builder : threadPool.builders()) {
|
||||
additionalSettings.addAll(builder.getRegisteredSettings());
|
||||
}
|
||||
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
|
||||
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.nodeModules()) {
|
||||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
|
||||
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
|
||||
modules.add(new SearchModule(settings, namedWriteableRegistry, true));
|
||||
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
|
||||
pluginsService.filterPlugins(ActionPlugin.class));
|
||||
modules.add(actionModule);
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
|
||||
settingsModule.getClusterSettings());
|
||||
resourcesToClose.add(circuitBreakerService);
|
||||
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
|
||||
resourcesToClose.add(bigArrays);
|
||||
modules.add(settingsModule);
|
||||
modules.add((b -> {
|
||||
b.bind(BigArrays.class).toInstance(bigArrays);
|
||||
b.bind(PluginsService.class).toInstance(pluginsService);
|
||||
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
|
||||
}));
|
||||
|
||||
Injector injector = modules.createInjector();
|
||||
final TransportService transportService = injector.getInstance(TransportService.class);
|
||||
final TransportClientNodesService nodesService =
|
||||
new TransportClientNodesService(settings, transportService, threadPool);
|
||||
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService,
|
||||
actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList()));
|
||||
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
TransportClient transportClient = new TransportClient(injector, nodesService, proxy);
|
||||
resourcesToClose.clear();
|
||||
return transportClient;
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(resourcesToClose);
|
||||
protected static Collection<Class<? extends Plugin>> addPlugins(Collection<Class<? extends Plugin>> collection,
|
||||
Collection<Class<? extends Plugin>> plugins) {
|
||||
ArrayList<Class<? extends Plugin>> list = new ArrayList<>(collection);
|
||||
for (Class<? extends Plugin> p : plugins) {
|
||||
if (list.contains(p)) {
|
||||
throw new IllegalArgumentException("plugin already exists: " + p);
|
||||
}
|
||||
list.add(p);
|
||||
}
|
||||
return list;
|
||||
}
|
||||
|
||||
private static ClientTemplate buildTemplate(Settings providedSettings, Settings defaultSettings,
|
||||
Collection<Class<? extends Plugin>> plugins) {
|
||||
final PluginsService pluginsService = newPluginService(providedSettings, plugins);
|
||||
final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).build();
|
||||
final List<Closeable> resourcesToClose = new ArrayList<>();
|
||||
final ThreadPool threadPool = new ThreadPool(settings);
|
||||
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
|
||||
final NetworkService networkService = new NetworkService(settings);
|
||||
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
|
||||
try {
|
||||
final List<Setting<?>> additionalSettings = new ArrayList<>();
|
||||
final List<String> additionalSettingsFilter = new ArrayList<>();
|
||||
additionalSettings.addAll(pluginsService.getPluginSettings());
|
||||
additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter());
|
||||
for (final ExecutorBuilder<?> builder : threadPool.builders()) {
|
||||
additionalSettings.addAll(builder.getRegisteredSettings());
|
||||
}
|
||||
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
|
||||
|
||||
ModulesBuilder modules = new ModulesBuilder();
|
||||
// plugin modules must be added here, before others or we can get crazy injection errors...
|
||||
for (Module pluginModule : pluginsService.createGuiceModules()) {
|
||||
modules.add(pluginModule);
|
||||
}
|
||||
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
|
||||
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
|
||||
modules.add(new SearchModule(settings, namedWriteableRegistry, true, pluginsService.filterPlugins(SearchPlugin.class)));
|
||||
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
|
||||
pluginsService.filterPlugins(ActionPlugin.class));
|
||||
modules.add(actionModule);
|
||||
|
||||
pluginsService.processModules(modules);
|
||||
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
|
||||
settingsModule.getClusterSettings());
|
||||
resourcesToClose.add(circuitBreakerService);
|
||||
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
|
||||
resourcesToClose.add(bigArrays);
|
||||
modules.add(settingsModule);
|
||||
modules.add((b -> {
|
||||
b.bind(BigArrays.class).toInstance(bigArrays);
|
||||
b.bind(PluginsService.class).toInstance(pluginsService);
|
||||
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
|
||||
}));
|
||||
|
||||
Injector injector = modules.createInjector();
|
||||
final TransportService transportService = injector.getInstance(TransportService.class);
|
||||
final TransportClientNodesService nodesService =
|
||||
new TransportClientNodesService(settings, transportService, threadPool);
|
||||
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService,
|
||||
actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList()));
|
||||
|
||||
List<LifecycleComponent> pluginLifecycleComponents = new ArrayList<>();
|
||||
pluginLifecycleComponents.addAll(pluginsService.getGuiceServiceClasses().stream()
|
||||
.map(injector::getInstance).collect(Collectors.toList()));
|
||||
resourcesToClose.addAll(pluginLifecycleComponents);
|
||||
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
|
||||
ClientTemplate transportClient = new ClientTemplate(injector, pluginLifecycleComponents, nodesService, proxy);
|
||||
resourcesToClose.clear();
|
||||
return transportClient;
|
||||
} finally {
|
||||
IOUtils.closeWhileHandlingException(resourcesToClose);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class ClientTemplate {
|
||||
final Injector injector;
|
||||
private final List<LifecycleComponent> pluginLifecycleComponents;
|
||||
private final TransportClientNodesService nodesService;
|
||||
private final TransportProxyClient proxy;
|
||||
|
||||
private ClientTemplate(Injector injector, List<LifecycleComponent> pluginLifecycleComponents, TransportClientNodesService nodesService, TransportProxyClient proxy) {
|
||||
this.injector = injector;
|
||||
this.pluginLifecycleComponents = pluginLifecycleComponents;
|
||||
this.nodesService = nodesService;
|
||||
this.proxy = proxy;
|
||||
}
|
||||
|
||||
Settings getSettings() {
|
||||
return injector.getInstance(Settings.class);
|
||||
}
|
||||
|
||||
ThreadPool getThreadPool() {
|
||||
return injector.getInstance(ThreadPool.class);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -183,14 +191,33 @@ public class TransportClient extends AbstractClient {
|
|||
|
||||
final Injector injector;
|
||||
|
||||
private final List<LifecycleComponent> pluginLifecycleComponents;
|
||||
private final TransportClientNodesService nodesService;
|
||||
private final TransportProxyClient proxy;
|
||||
|
||||
private TransportClient(Injector injector, TransportClientNodesService nodesService, TransportProxyClient proxy) {
|
||||
super(injector.getInstance(Settings.class), injector.getInstance(ThreadPool.class));
|
||||
this.injector = injector;
|
||||
this.nodesService = nodesService;
|
||||
this.proxy = proxy;
|
||||
/**
|
||||
* Creates a new TransportClient with the given settings and plugins
|
||||
*/
|
||||
public TransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
|
||||
this(buildTemplate(settings, Settings.EMPTY, plugins));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new TransportClient with the given settings, defaults and plugins.
|
||||
* @param settings the client settings
|
||||
* @param defaultSettings default settings that are merged after the plugins have added it's additional settings.
|
||||
* @param plugins the client plugins
|
||||
*/
|
||||
protected TransportClient(Settings settings, Settings defaultSettings, Collection<Class<? extends Plugin>> plugins) {
|
||||
this(buildTemplate(settings, defaultSettings, plugins));
|
||||
}
|
||||
|
||||
private TransportClient(ClientTemplate template) {
|
||||
super(template.getSettings(), template.getThreadPool());
|
||||
this.injector = template.injector;
|
||||
this.pluginLifecycleComponents = Collections.unmodifiableList(template.pluginLifecycleComponents);
|
||||
this.nodesService = template.nodesService;
|
||||
this.proxy = template.proxy;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -269,8 +296,8 @@ public class TransportClient extends AbstractClient {
|
|||
closeables.add(nodesService);
|
||||
closeables.add(injector.getInstance(TransportService.class));
|
||||
|
||||
for (Class<? extends LifecycleComponent> plugin : injector.getInstance(PluginsService.class).nodeServices()) {
|
||||
closeables.add(injector.getInstance(plugin));
|
||||
for (LifecycleComponent plugin : pluginLifecycleComponents) {
|
||||
closeables.add(plugin);
|
||||
}
|
||||
closeables.add(() -> ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS));
|
||||
closeables.add(injector.getInstance(BigArrays.class));
|
||||
|
|
|
@ -19,21 +19,31 @@
|
|||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContent.Params;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
/**
|
||||
* ClusterInfo is an object representing a map of nodes to {@link DiskUsage}
|
||||
* and a map of shard ids to shard sizes, see
|
||||
* <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code>
|
||||
* for the key used in the shardSizes map
|
||||
*/
|
||||
public class ClusterInfo {
|
||||
public class ClusterInfo implements ToXContent, Writeable {
|
||||
private final ImmutableOpenMap<String, DiskUsage> leastAvailableSpaceUsage;
|
||||
private final ImmutableOpenMap<String, DiskUsage> mostAvailableSpaceUsage;
|
||||
final ImmutableOpenMap<String, Long> shardSizes;
|
||||
public static final ClusterInfo EMPTY = new ClusterInfo();
|
||||
private final ImmutableOpenMap<ShardRouting, String> routingToDataPath;
|
||||
final ImmutableOpenMap<ShardRouting, String> routingToDataPath;
|
||||
|
||||
protected ClusterInfo() {
|
||||
this(ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of(), ImmutableOpenMap.of());
|
||||
|
@ -57,6 +67,86 @@ public class ClusterInfo {
|
|||
this.routingToDataPath = routingToDataPath;
|
||||
}
|
||||
|
||||
public ClusterInfo(StreamInput in) throws IOException {
|
||||
Map<String, DiskUsage> leastMap = in.readMap(StreamInput::readString, DiskUsage::new);
|
||||
Map<String, DiskUsage> mostMap = in.readMap(StreamInput::readString, DiskUsage::new);
|
||||
Map<String, Long> sizeMap = in.readMap(StreamInput::readString, StreamInput::readLong);
|
||||
Map<ShardRouting, String> routingMap = in.readMap(ShardRouting::new, StreamInput::readString);
|
||||
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> leastBuilder = ImmutableOpenMap.builder();
|
||||
this.leastAvailableSpaceUsage = leastBuilder.putAll(leastMap).build();
|
||||
ImmutableOpenMap.Builder<String, DiskUsage> mostBuilder = ImmutableOpenMap.builder();
|
||||
this.mostAvailableSpaceUsage = mostBuilder.putAll(mostMap).build();
|
||||
ImmutableOpenMap.Builder<String, Long> sizeBuilder = ImmutableOpenMap.builder();
|
||||
this.shardSizes = sizeBuilder.putAll(sizeMap).build();
|
||||
ImmutableOpenMap.Builder<ShardRouting, String> routingBuilder = ImmutableOpenMap.builder();
|
||||
this.routingToDataPath = routingBuilder.putAll(routingMap).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeVInt(this.leastAvailableSpaceUsage.size());
|
||||
for (ObjectObjectCursor<String, DiskUsage> c : this.leastAvailableSpaceUsage) {
|
||||
out.writeString(c.key);
|
||||
c.value.writeTo(out);
|
||||
}
|
||||
out.writeVInt(this.mostAvailableSpaceUsage.size());
|
||||
for (ObjectObjectCursor<String, DiskUsage> c : this.mostAvailableSpaceUsage) {
|
||||
out.writeString(c.key);
|
||||
c.value.writeTo(out);
|
||||
}
|
||||
out.writeVInt(this.shardSizes.size());
|
||||
for (ObjectObjectCursor<String, Long> c : this.shardSizes) {
|
||||
out.writeString(c.key);
|
||||
if (c.value == null) {
|
||||
out.writeLong(-1);
|
||||
} else {
|
||||
out.writeLong(c.value);
|
||||
}
|
||||
}
|
||||
out.writeVInt(this.routingToDataPath.size());
|
||||
for (ObjectObjectCursor<ShardRouting, String> c : this.routingToDataPath) {
|
||||
c.key.writeTo(out);
|
||||
out.writeString(c.value);
|
||||
}
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject("nodes"); {
|
||||
for (ObjectObjectCursor<String, DiskUsage> c : this.leastAvailableSpaceUsage) {
|
||||
builder.startObject(c.key); { // node
|
||||
builder.field("node_name", c.value.getNodeName());
|
||||
builder.startObject("least_available"); {
|
||||
c.value.toShortXContent(builder, params);
|
||||
}
|
||||
builder.endObject(); // end "least_available"
|
||||
builder.startObject("most_available"); {
|
||||
DiskUsage most = this.mostAvailableSpaceUsage.get(c.key);
|
||||
if (most != null) {
|
||||
most.toShortXContent(builder, params);
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end "most_available"
|
||||
}
|
||||
builder.endObject(); // end $nodename
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end "nodes"
|
||||
builder.startObject("shard_sizes"); {
|
||||
for (ObjectObjectCursor<String, Long> c : this.shardSizes) {
|
||||
builder.byteSizeField(c.key + "_bytes", c.key, c.value);
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end "shard_sizes"
|
||||
builder.startObject("shard_paths"); {
|
||||
for (ObjectObjectCursor<ShardRouting, String> c : this.routingToDataPath) {
|
||||
builder.field(c.key.toString(), c.value);
|
||||
}
|
||||
}
|
||||
builder.endObject(); // end "shard_paths"
|
||||
return builder;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a node id to disk usage mapping for the path that has the least available space on the node.
|
||||
*/
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeService;
|
||||
import org.elasticsearch.cluster.routing.DelayedAllocationService;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
|
@ -146,7 +145,6 @@ public class ClusterModule extends AbstractModule {
|
|||
bind(ClusterInfoService.class).to(clusterInfoServiceImpl).asEagerSingleton();
|
||||
bind(GatewayAllocator.class).asEagerSingleton();
|
||||
bind(AllocationService.class).asEagerSingleton();
|
||||
bind(DiscoveryNodeService.class).asEagerSingleton();
|
||||
bind(ClusterService.class).toInstance(clusterService);
|
||||
bind(NodeConnectionsService.class).asEagerSingleton();
|
||||
bind(MetaDataCreateIndexService.class).asEagerSingleton();
|
||||
|
|
|
@ -20,12 +20,20 @@
|
|||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Encapsulation class used to represent the amount of disk used on a node.
|
||||
*/
|
||||
public class DiskUsage {
|
||||
public class DiskUsage implements ToXContent, Writeable {
|
||||
final String nodeId;
|
||||
final String nodeName;
|
||||
final String path;
|
||||
|
@ -44,6 +52,44 @@ public class DiskUsage {
|
|||
this.path = path;
|
||||
}
|
||||
|
||||
public DiskUsage(StreamInput in) throws IOException {
|
||||
this.nodeId = in.readString();
|
||||
this.nodeName = in.readString();
|
||||
this.path = in.readString();
|
||||
this.totalBytes = in.readVLong();
|
||||
this.freeBytes = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(this.nodeId);
|
||||
out.writeString(this.nodeName);
|
||||
out.writeString(this.path);
|
||||
out.writeVLong(this.totalBytes);
|
||||
out.writeVLong(this.freeBytes);
|
||||
}
|
||||
|
||||
private static double truncatePercent(double pct) {
|
||||
return Math.round(pct * 10.0) / 10.0;
|
||||
}
|
||||
|
||||
public XContentBuilder toShortXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("path", this.path);
|
||||
builder.byteSizeField("total_bytes", "total", this.totalBytes);
|
||||
builder.byteSizeField("used_bytes", "used", this.getUsedBytes());
|
||||
builder.byteSizeField("free_bytes", "free", this.freeBytes);
|
||||
builder.field("free_disk_percent", truncatePercent(this.getFreeDiskAsPercentage()));
|
||||
builder.field("used_disk_percent", truncatePercent(this.getUsedDiskAsPercentage()));
|
||||
return builder;
|
||||
}
|
||||
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.field("node_id", this.nodeId);
|
||||
builder.field("node_name", this.nodeName);
|
||||
builder = toShortXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return nodeId;
|
||||
}
|
||||
|
@ -81,6 +127,24 @@ public class DiskUsage {
|
|||
return getTotalBytes() - getFreeBytes();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
DiskUsage other = (DiskUsage) o;
|
||||
return Objects.equals(nodeId, other.nodeId) &&
|
||||
Objects.equals(nodeName, other.nodeName) &&
|
||||
Objects.equals(totalBytes, other.totalBytes) &&
|
||||
Objects.equals(freeBytes, other.freeBytes);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(nodeId, nodeName, path, totalBytes, freeBytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + nodeId + "][" + nodeName + "][" + path + "] free: " + new ByteSizeValue(getFreeBytes()) +
|
||||
|
|
|
@ -19,18 +19,26 @@
|
|||
package org.elasticsearch.cluster;
|
||||
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Thrown when a node join request or a master ping reaches a node which is not
|
||||
* currently acting as a master or when a cluster state update task is to be executed
|
||||
* on a node that is no longer master.
|
||||
*/
|
||||
public class NotMasterException extends IllegalStateException {
|
||||
public class NotMasterException extends ElasticsearchException {
|
||||
|
||||
public NotMasterException(String msg) {
|
||||
super(msg);
|
||||
}
|
||||
|
||||
public NotMasterException(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Throwable fillInStackTrace() {
|
||||
return null;
|
||||
|
|
|
@ -17,23 +17,24 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
package org.elasticsearch.cluster.ack;
|
||||
|
||||
/**
|
||||
* Binds specific instance of RepositoryName for injection to repository module
|
||||
* A cluster state update response with specific fields for index creation.
|
||||
*/
|
||||
public class RepositoryNameModule extends AbstractModule {
|
||||
public class CreateIndexClusterStateUpdateResponse extends ClusterStateUpdateResponse {
|
||||
|
||||
private final RepositoryName repositoryName;
|
||||
private final boolean shardsAcked;
|
||||
|
||||
public RepositoryNameModule(RepositoryName repositoryName) {
|
||||
this.repositoryName = repositoryName;
|
||||
public CreateIndexClusterStateUpdateResponse(boolean acknowledged, boolean shardsAcked) {
|
||||
super(acknowledged);
|
||||
this.shardsAcked = shardsAcked;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void configure() {
|
||||
bind(RepositoryName.class).toInstance(repositoryName);
|
||||
/**
|
||||
* Returns whether the requisite number of shard copies started before the completion of the operation.
|
||||
*/
|
||||
public boolean isShardsAcked() {
|
||||
return shardsAcked;
|
||||
}
|
||||
}
|
|
@ -54,7 +54,7 @@ public final class ClusterIndexHealth implements Iterable<ClusterShardHealth>, W
|
|||
|
||||
for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) {
|
||||
int shardId = shardRoutingTable.shardId().id();
|
||||
shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable));
|
||||
shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable, indexMetaData));
|
||||
}
|
||||
|
||||
// update the index status
|
||||
|
|
|
@ -19,8 +19,12 @@
|
|||
|
||||
package org.elasticsearch.cluster.health;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.Reason;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
@ -37,13 +41,12 @@ public final class ClusterShardHealth implements Writeable {
|
|||
private final int unassignedShards;
|
||||
private final boolean primaryActive;
|
||||
|
||||
public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable) {
|
||||
public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable, final IndexMetaData indexMetaData) {
|
||||
this.shardId = shardId;
|
||||
int computeActiveShards = 0;
|
||||
int computeRelocatingShards = 0;
|
||||
int computeInitializingShards = 0;
|
||||
int computeUnassignedShards = 0;
|
||||
boolean computePrimaryActive = false;
|
||||
for (ShardRouting shardRouting : shardRoutingTable) {
|
||||
if (shardRouting.active()) {
|
||||
computeActiveShards++;
|
||||
|
@ -51,9 +54,6 @@ public final class ClusterShardHealth implements Writeable {
|
|||
// the shard is relocating, the one it is relocating to will be in initializing state, so we don't count it
|
||||
computeRelocatingShards++;
|
||||
}
|
||||
if (shardRouting.primary()) {
|
||||
computePrimaryActive = true;
|
||||
}
|
||||
} else if (shardRouting.initializing()) {
|
||||
computeInitializingShards++;
|
||||
} else if (shardRouting.unassigned()) {
|
||||
|
@ -61,21 +61,22 @@ public final class ClusterShardHealth implements Writeable {
|
|||
}
|
||||
}
|
||||
ClusterHealthStatus computeStatus;
|
||||
if (computePrimaryActive) {
|
||||
final ShardRouting primaryRouting = shardRoutingTable.primaryShard();
|
||||
if (primaryRouting.active()) {
|
||||
if (computeActiveShards == shardRoutingTable.size()) {
|
||||
computeStatus = ClusterHealthStatus.GREEN;
|
||||
} else {
|
||||
computeStatus = ClusterHealthStatus.YELLOW;
|
||||
}
|
||||
} else {
|
||||
computeStatus = ClusterHealthStatus.RED;
|
||||
computeStatus = getInactivePrimaryHealth(primaryRouting, indexMetaData);
|
||||
}
|
||||
this.status = computeStatus;
|
||||
this.activeShards = computeActiveShards;
|
||||
this.relocatingShards = computeRelocatingShards;
|
||||
this.initializingShards = computeInitializingShards;
|
||||
this.unassignedShards = computeUnassignedShards;
|
||||
this.primaryActive = computePrimaryActive;
|
||||
this.primaryActive = primaryRouting.active();
|
||||
}
|
||||
|
||||
public ClusterShardHealth(final StreamInput in) throws IOException {
|
||||
|
@ -126,4 +127,36 @@ public final class ClusterShardHealth implements Writeable {
|
|||
out.writeVInt(unassignedShards);
|
||||
out.writeBoolean(primaryActive);
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if an inactive primary shard should cause the cluster health to go RED.
|
||||
*
|
||||
* Normally, an inactive primary shard in an index should cause the cluster health to be RED. However,
|
||||
* there are exceptions where a health status of RED is inappropriate, namely in these scenarios:
|
||||
* 1. Index Creation. When an index is first created, the primary shards are in the initializing state, so
|
||||
* there is a small window where the cluster health is RED due to the primaries not being activated yet.
|
||||
* However, this leads to a false sense that the cluster is in an unhealthy state, when in reality, its
|
||||
* simply a case of needing to wait for the primaries to initialize.
|
||||
* 2. When a cluster is in the recovery state, and the shard never had any allocation ids assigned to it,
|
||||
* which indicates the index was created and before allocation of the primary occurred for this shard,
|
||||
* a cluster restart happened.
|
||||
*
|
||||
* Here, we check for these scenarios and set the cluster health to YELLOW if any are applicable.
|
||||
*
|
||||
* NB: this method should *not* be called on active shards nor on non-primary shards.
|
||||
*/
|
||||
public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting, final IndexMetaData indexMetaData) {
|
||||
assert shardRouting.primary() : "cannot invoke on a replica shard: " + shardRouting;
|
||||
assert shardRouting.active() == false : "cannot invoke on an active shard: " + shardRouting;
|
||||
assert shardRouting.unassignedInfo() != null : "cannot invoke on a shard with no UnassignedInfo: " + shardRouting;
|
||||
final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
|
||||
if (unassignedInfo.getLastAllocationStatus() != AllocationStatus.DECIDERS_NO
|
||||
&& shardRouting.allocatedPostIndexCreate(indexMetaData) == false
|
||||
&& (unassignedInfo.getReason() == Reason.INDEX_CREATED || unassignedInfo.getReason() == Reason.CLUSTER_RECOVERED)) {
|
||||
return ClusterHealthStatus.YELLOW;
|
||||
} else {
|
||||
return ClusterHealthStatus.RED;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,9 +27,11 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardsObserver;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
|
@ -68,6 +70,7 @@ import org.elasticsearch.indices.IndexAlreadyExistsException;
|
|||
import org.elasticsearch.indices.IndexCreationException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidIndexNameException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.joda.time.DateTime;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
|
@ -108,13 +111,15 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
private final Environment env;
|
||||
private final NodeServicesProvider nodeServicesProvider;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
private final ActiveShardsObserver activeShardsObserver;
|
||||
|
||||
@Inject
|
||||
public MetaDataCreateIndexService(Settings settings, ClusterService clusterService,
|
||||
IndicesService indicesService, AllocationService allocationService,
|
||||
AliasValidator aliasValidator,
|
||||
Set<IndexTemplateFilter> indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings) {
|
||||
Set<IndexTemplateFilter> indexTemplateFilters, Environment env,
|
||||
NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings,
|
||||
ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
|
@ -135,6 +140,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
}
|
||||
this.indexTemplateFilter = new IndexTemplateFilter.Compound(templateFilters);
|
||||
}
|
||||
this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool);
|
||||
}
|
||||
|
||||
public void validateIndexName(String index, ClusterState state) {
|
||||
|
@ -176,7 +182,38 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public void createIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
/**
|
||||
* Creates an index in the cluster state and waits for the specified number of shard copies to
|
||||
* become active (as specified in {@link CreateIndexClusterStateUpdateRequest#waitForActiveShards()})
|
||||
* before sending the response on the listener. If the index creation was successfully applied on
|
||||
* the cluster state, then {@link CreateIndexClusterStateUpdateResponse#isAcknowledged()} will return
|
||||
* true, otherwise it will return false and no waiting will occur for started shards
|
||||
* ({@link CreateIndexClusterStateUpdateResponse#isShardsAcked()} will also be false). If the index
|
||||
* creation in the cluster state was successful and the requisite shard copies were started before
|
||||
* the timeout, then {@link CreateIndexClusterStateUpdateResponse#isShardsAcked()} will
|
||||
* return true, otherwise if the operation timed out, then it will return false.
|
||||
*
|
||||
* @param request the index creation cluster state update request
|
||||
* @param listener the listener on which to send the index creation cluster state update response
|
||||
*/
|
||||
public void createIndex(final CreateIndexClusterStateUpdateRequest request,
|
||||
final ActionListener<CreateIndexClusterStateUpdateResponse> listener) {
|
||||
onlyCreateIndex(request, ActionListener.wrap(response -> {
|
||||
if (response.isAcknowledged()) {
|
||||
activeShardsObserver.waitForActiveShards(request.index(), request.waitForActiveShards(), request.ackTimeout(),
|
||||
shardsAcked -> {
|
||||
logger.debug("[{}] index created, but the operation timed out while waiting for " +
|
||||
"enough shards to be started.", request.index());
|
||||
listener.onResponse(new CreateIndexClusterStateUpdateResponse(response.isAcknowledged(), shardsAcked));
|
||||
}, listener::onFailure);
|
||||
} else {
|
||||
listener.onResponse(new CreateIndexClusterStateUpdateResponse(false, false));
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
private void onlyCreateIndex(final CreateIndexClusterStateUpdateRequest request,
|
||||
final ActionListener<ClusterStateUpdateResponse> listener) {
|
||||
Settings.Builder updatedSettingsBuilder = Settings.builder();
|
||||
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
indexScopedSettings.validate(updatedSettingsBuilder);
|
||||
|
@ -308,6 +345,11 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
.setRoutingNumShards(routingNumShards);
|
||||
// Set up everything, now locally create the index to see that things are ok, and apply
|
||||
final IndexMetaData tmpImd = tmpImdBuilder.settings(actualIndexSettings).build();
|
||||
if (request.waitForActiveShards().resolve(tmpImd) > tmpImd.getNumberOfReplicas() + 1) {
|
||||
throw new IllegalArgumentException("invalid wait_for_active_shards[" + request.waitForActiveShards() +
|
||||
"]: cannot be greater than number of shard copies [" +
|
||||
(tmpImd.getNumberOfReplicas() + 1) + "]");
|
||||
}
|
||||
// create the index here (on the master) to validate it can be created, as well as adding the mapping
|
||||
final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
|
||||
createdIndex = indexService.index();
|
||||
|
@ -408,6 +450,16 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(String source, Exception e) {
|
||||
if (e instanceof IndexAlreadyExistsException) {
|
||||
logger.trace("[{}] failed to create", e, request.index());
|
||||
} else {
|
||||
logger.debug("[{}] failed to create", e, request.index());
|
||||
}
|
||||
super.onFailure(source, e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -35,9 +35,11 @@ import java.io.IOException;
|
|||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.elasticsearch.common.transport.TransportAddressSerializers.addressToStream;
|
||||
|
||||
|
@ -46,23 +48,6 @@ import static org.elasticsearch.common.transport.TransportAddressSerializers.add
|
|||
*/
|
||||
public class DiscoveryNode implements Writeable, ToXContent {
|
||||
|
||||
public static boolean isLocalNode(Settings settings) {
|
||||
if (Node.NODE_LOCAL_SETTING.exists(settings)) {
|
||||
return Node.NODE_LOCAL_SETTING.get(settings);
|
||||
}
|
||||
if (Node.NODE_MODE_SETTING.exists(settings)) {
|
||||
String nodeMode = Node.NODE_MODE_SETTING.get(settings);
|
||||
if ("local".equals(nodeMode)) {
|
||||
return true;
|
||||
} else if ("network".equals(nodeMode)) {
|
||||
return false;
|
||||
} else {
|
||||
throw new IllegalArgumentException("unsupported node.mode [" + nodeMode + "]. Should be one of [local, network].");
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static boolean nodeRequiresLocalStorage(Settings settings) {
|
||||
boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings);
|
||||
if (localStorageEnable == false &&
|
||||
|
@ -97,6 +82,24 @@ public class DiscoveryNode implements Writeable, ToXContent {
|
|||
private final Version version;
|
||||
private final Set<Role> roles;
|
||||
|
||||
|
||||
/**
|
||||
* Creates a new {@link DiscoveryNode}
|
||||
* <p>
|
||||
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
|
||||
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
|
||||
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
|
||||
* and updated.
|
||||
* </p>
|
||||
*
|
||||
* @param id the nodes unique (persistent) node id. This constructor will auto generate a random ephemeral id.
|
||||
* @param address the nodes transport address
|
||||
* @param version the version of the node
|
||||
*/
|
||||
public DiscoveryNode(final String id, TransportAddress address, Version version) {
|
||||
this(id, address, Collections.emptyMap(), Collections.emptySet(), version);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link DiscoveryNode}
|
||||
* <p>
|
||||
|
@ -187,6 +190,24 @@ public class DiscoveryNode implements Writeable, ToXContent {
|
|||
this.roles = Collections.unmodifiableSet(rolesSet);
|
||||
}
|
||||
|
||||
/** Creates a DiscoveryNode representing the local node. */
|
||||
public static DiscoveryNode createLocal(Settings settings, TransportAddress publishAddress, String nodeIdSupplier) {
|
||||
Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(settings).getAsMap());
|
||||
Set<DiscoveryNode.Role> roles = new HashSet<>();
|
||||
if (Node.NODE_INGEST_SETTING.get(settings)) {
|
||||
roles.add(DiscoveryNode.Role.INGEST);
|
||||
}
|
||||
if (Node.NODE_MASTER_SETTING.get(settings)) {
|
||||
roles.add(DiscoveryNode.Role.MASTER);
|
||||
}
|
||||
if (Node.NODE_DATA_SETTING.get(settings)) {
|
||||
roles.add(DiscoveryNode.Role.DATA);
|
||||
}
|
||||
|
||||
return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeIdSupplier, publishAddress,
|
||||
attributes, roles, Version.CURRENT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link DiscoveryNode} by reading from the stream provided as argument
|
||||
* @param in the stream
|
||||
|
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.node;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class DiscoveryNodeService extends AbstractComponent {
|
||||
|
||||
private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<>();
|
||||
|
||||
@Inject
|
||||
public DiscoveryNodeService(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
public DiscoveryNodeService addCustomAttributeProvider(CustomAttributesProvider customAttributesProvider) {
|
||||
customAttributesProviders.add(customAttributesProvider);
|
||||
return this;
|
||||
}
|
||||
|
||||
public DiscoveryNode buildLocalNode(TransportAddress publishAddress, Supplier<String> nodeIdSupplier) {
|
||||
Map<String, String> attributes = new HashMap<>(Node.NODE_ATTRIBUTES.get(this.settings).getAsMap());
|
||||
Set<DiscoveryNode.Role> roles = new HashSet<>();
|
||||
if (Node.NODE_INGEST_SETTING.get(settings)) {
|
||||
roles.add(DiscoveryNode.Role.INGEST);
|
||||
}
|
||||
if (Node.NODE_MASTER_SETTING.get(settings)) {
|
||||
roles.add(DiscoveryNode.Role.MASTER);
|
||||
}
|
||||
if (Node.NODE_DATA_SETTING.get(settings)) {
|
||||
roles.add(DiscoveryNode.Role.DATA);
|
||||
}
|
||||
|
||||
for (CustomAttributesProvider provider : customAttributesProviders) {
|
||||
try {
|
||||
Map<String, String> customAttributes = provider.buildAttributes();
|
||||
if (customAttributes != null) {
|
||||
for (Map.Entry<String, String> entry : customAttributes.entrySet()) {
|
||||
if (!attributes.containsKey(entry.getKey())) {
|
||||
attributes.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("failed to build custom attributes from provider [{}]", e, provider);
|
||||
}
|
||||
}
|
||||
return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeIdSupplier.get(), publishAddress, attributes, roles,
|
||||
Version.CURRENT);
|
||||
}
|
||||
|
||||
public interface CustomAttributesProvider {
|
||||
|
||||
Map<String, String> buildAttributes();
|
||||
}
|
||||
}
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.util.CollectionUtil;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
@ -641,14 +642,27 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
* Should be used with caution, typically,
|
||||
* the correct usage is to removeAndIgnore from the iterator.
|
||||
* @see #ignored()
|
||||
* @see UnassignedIterator#removeAndIgnore()
|
||||
* @see UnassignedIterator#removeAndIgnore(AllocationStatus)
|
||||
* @see #isIgnoredEmpty()
|
||||
* @return true iff the decision caused a change to the unassigned info
|
||||
*/
|
||||
public void ignoreShard(ShardRouting shard) {
|
||||
public boolean ignoreShard(ShardRouting shard, AllocationStatus allocationStatus) {
|
||||
boolean changed = false;
|
||||
if (shard.primary()) {
|
||||
ignoredPrimaries++;
|
||||
UnassignedInfo currInfo = shard.unassignedInfo();
|
||||
assert currInfo != null;
|
||||
if (allocationStatus.equals(currInfo.getLastAllocationStatus()) == false) {
|
||||
UnassignedInfo newInfo = new UnassignedInfo(currInfo.getReason(), currInfo.getMessage(), currInfo.getFailure(),
|
||||
currInfo.getNumFailedAllocations(), currInfo.getUnassignedTimeInNanos(),
|
||||
currInfo.getUnassignedTimeInMillis(), currInfo.isDelayed(),
|
||||
allocationStatus);
|
||||
shard = shard.updateUnassignedInfo(newInfo);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
ignored.add(shard);
|
||||
return changed;
|
||||
}
|
||||
|
||||
public class UnassignedIterator implements Iterator<ShardRouting> {
|
||||
|
@ -685,10 +699,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
* will be added back to unassigned once the metadata is constructed again).
|
||||
* Typically this is used when an allocation decision prevents a shard from being allocated such
|
||||
* that subsequent consumers of this API won't try to allocate this shard again.
|
||||
*
|
||||
* @param attempt the result of the allocation attempt
|
||||
* @return true iff the decision caused an update to the unassigned info
|
||||
*/
|
||||
public void removeAndIgnore() {
|
||||
public boolean removeAndIgnore(AllocationStatus attempt) {
|
||||
innerRemove();
|
||||
ignoreShard(current);
|
||||
return ignoreShard(current, attempt);
|
||||
}
|
||||
|
||||
private void updateShardRouting(ShardRouting shardRouting) {
|
||||
|
@ -721,7 +738,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
}
|
||||
|
||||
/**
|
||||
* Unsupported operation, just there for the interface. Use {@link #removeAndIgnore()} or
|
||||
* Unsupported operation, just there for the interface. Use {@link #removeAndIgnore(AllocationStatus)} or
|
||||
* {@link #initialize(String, String, long)}.
|
||||
*/
|
||||
@Override
|
||||
|
@ -747,8 +764,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
|
||||
/**
|
||||
* Returns <code>true</code> iff any unassigned shards are marked as temporarily ignored.
|
||||
* @see UnassignedShards#ignoreShard(ShardRouting)
|
||||
* @see UnassignedIterator#removeAndIgnore()
|
||||
* @see UnassignedShards#ignoreShard(ShardRouting, AllocationStatus)
|
||||
* @see UnassignedIterator#removeAndIgnore(AllocationStatus)
|
||||
*/
|
||||
public boolean isIgnoredEmpty() {
|
||||
return ignored.isEmpty();
|
||||
|
@ -878,6 +895,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
|||
assert inactiveShardCount == routingNodes.inactiveShardCount :
|
||||
"Inactive Shard count [" + inactiveShardCount + "] but RoutingNodes returned inactive shards [" + routingNodes.inactiveShardCount + "]";
|
||||
assert routingNodes.getRelocatingShardCount() == relocating : "Relocating shards mismatch [" + routingNodes.getRelocatingShardCount() + "] but expected [" + relocating + "]";
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -237,10 +237,9 @@ public final class ShardRouting implements Writeable, ToXContent {
|
|||
return true;
|
||||
}
|
||||
|
||||
// unassigned info is only cleared when a shard moves to started, so
|
||||
// for unassigned and initializing (we checked for active() before),
|
||||
// we can safely assume it is there
|
||||
if (unassignedInfo.getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
// initializing replica might not have unassignedInfo
|
||||
assert unassignedInfo != null || (primary == false && state == ShardRoutingState.INITIALIZING);
|
||||
if (unassignedInfo != null && unassignedInfo.getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing;
|
|||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -36,6 +37,8 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Holds additional information as to why the shard is in unassigned state.
|
||||
|
@ -105,7 +108,94 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
/**
|
||||
* Unassigned as a result of a failed primary while the replica was initializing.
|
||||
*/
|
||||
PRIMARY_FAILED;
|
||||
PRIMARY_FAILED
|
||||
}
|
||||
|
||||
/**
|
||||
* Captures the status of an unsuccessful allocation attempt for the shard,
|
||||
* causing it to remain in the unassigned state.
|
||||
*
|
||||
* Note, ordering of the enum is important, make sure to add new values
|
||||
* at the end and handle version serialization properly.
|
||||
*/
|
||||
public enum AllocationStatus implements Writeable {
|
||||
/**
|
||||
* The shard was denied allocation to a node because the allocation deciders all returned a NO decision
|
||||
*/
|
||||
DECIDERS_NO((byte) 0),
|
||||
/**
|
||||
* The shard was denied allocation to a node because there were no valid shard copies found for it;
|
||||
* this can happen on node restart with gateway allocation
|
||||
*/
|
||||
NO_VALID_SHARD_COPY((byte) 1),
|
||||
/**
|
||||
* The allocation attempt was throttled on the shard by the allocation deciders
|
||||
*/
|
||||
DECIDERS_THROTTLED((byte) 2),
|
||||
/**
|
||||
* Waiting on getting shard data from all nodes before making a decision about where to allocate the shard
|
||||
*/
|
||||
FETCHING_SHARD_DATA((byte) 3),
|
||||
/**
|
||||
* Allocation decision has been delayed
|
||||
*/
|
||||
DELAYED_ALLOCATION((byte) 4),
|
||||
/**
|
||||
* No allocation attempt has been made yet
|
||||
*/
|
||||
NO_ATTEMPT((byte) 5);
|
||||
|
||||
private final byte id;
|
||||
|
||||
AllocationStatus(byte id) {
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
// package private for testing
|
||||
byte getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeByte(id);
|
||||
}
|
||||
|
||||
public static AllocationStatus readFrom(StreamInput in) throws IOException {
|
||||
byte id = in.readByte();
|
||||
switch (id) {
|
||||
case 0:
|
||||
return DECIDERS_NO;
|
||||
case 1:
|
||||
return NO_VALID_SHARD_COPY;
|
||||
case 2:
|
||||
return DECIDERS_THROTTLED;
|
||||
case 3:
|
||||
return FETCHING_SHARD_DATA;
|
||||
case 4:
|
||||
return DELAYED_ALLOCATION;
|
||||
case 5:
|
||||
return NO_ATTEMPT;
|
||||
default:
|
||||
throw new IllegalArgumentException("Unknown AllocationStatus value [" + id + "]");
|
||||
}
|
||||
}
|
||||
|
||||
public static AllocationStatus fromDecision(Decision decision) {
|
||||
Objects.requireNonNull(decision);
|
||||
switch (decision.type()) {
|
||||
case NO:
|
||||
return DECIDERS_NO;
|
||||
case THROTTLE:
|
||||
return DECIDERS_THROTTLED;
|
||||
default:
|
||||
throw new IllegalArgumentException("no allocation attempt from decision[" + decision.type() + "]");
|
||||
}
|
||||
}
|
||||
|
||||
public String value() {
|
||||
return toString().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
}
|
||||
|
||||
private final Reason reason;
|
||||
|
@ -115,6 +205,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
private final String message;
|
||||
private final Exception failure;
|
||||
private final int failedAllocations;
|
||||
private final AllocationStatus lastAllocationStatus; // result of the last allocation attempt for this shard
|
||||
|
||||
/**
|
||||
* creates an UnassignedInfo object based on **current** time
|
||||
|
@ -123,7 +214,8 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
* @param message more information about cause.
|
||||
**/
|
||||
public UnassignedInfo(Reason reason, String message) {
|
||||
this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis(), false);
|
||||
this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis(), false,
|
||||
AllocationStatus.NO_ATTEMPT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -133,16 +225,18 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
* @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation
|
||||
* @param unassignedTimeMillis the time of unassignment used to display to in our reporting.
|
||||
* @param delayed if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.
|
||||
* @param lastAllocationStatus the result of the last allocation attempt for this shard
|
||||
*/
|
||||
public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Exception failure, int failedAllocations,
|
||||
long unassignedTimeNanos, long unassignedTimeMillis, boolean delayed) {
|
||||
this.reason = reason;
|
||||
long unassignedTimeNanos, long unassignedTimeMillis, boolean delayed, AllocationStatus lastAllocationStatus) {
|
||||
this.reason = Objects.requireNonNull(reason);
|
||||
this.unassignedTimeMillis = unassignedTimeMillis;
|
||||
this.unassignedTimeNanos = unassignedTimeNanos;
|
||||
this.delayed = delayed;
|
||||
this.message = message;
|
||||
this.failure = failure;
|
||||
this.failedAllocations = failedAllocations;
|
||||
this.lastAllocationStatus = Objects.requireNonNull(lastAllocationStatus);
|
||||
assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED) :
|
||||
"failedAllocations: " + failedAllocations + " for reason " + reason;
|
||||
assert !(message == null && failure != null) : "provide a message if a failure exception is provided";
|
||||
|
@ -159,6 +253,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
this.message = in.readOptionalString();
|
||||
this.failure = in.readException();
|
||||
this.failedAllocations = in.readVInt();
|
||||
this.lastAllocationStatus = AllocationStatus.readFrom(in);
|
||||
}
|
||||
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
|
@ -169,6 +264,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
out.writeOptionalString(message);
|
||||
out.writeException(failure);
|
||||
out.writeVInt(failedAllocations);
|
||||
lastAllocationStatus.writeTo(out);
|
||||
}
|
||||
|
||||
public UnassignedInfo readFrom(StreamInput in) throws IOException {
|
||||
|
@ -240,6 +336,13 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
return message + (failure == null ? "" : ", failure " + ExceptionsHelper.detailedMessage(failure));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the status for the last allocation attempt for this shard.
|
||||
*/
|
||||
public AllocationStatus getLastAllocationStatus() {
|
||||
return lastAllocationStatus;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculates the delay left based on current time (in nanoseconds) and the delay defined by the index settings.
|
||||
* Only relevant if shard is effectively delayed (see {@link #isDelayed()})
|
||||
|
@ -302,6 +405,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
if (details != null) {
|
||||
sb.append(", details[").append(details).append("]");
|
||||
}
|
||||
sb.append(", allocation_status[").append(lastAllocationStatus.value()).append("]");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
|
@ -323,6 +427,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
if (details != null) {
|
||||
builder.field("details", details);
|
||||
}
|
||||
builder.field("allocation_status", lastAllocationStatus.value());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
@ -353,17 +458,22 @@ public final class UnassignedInfo implements ToXContent, Writeable {
|
|||
if (message != null ? !message.equals(that.message) : that.message != null) {
|
||||
return false;
|
||||
}
|
||||
if (lastAllocationStatus != that.lastAllocationStatus) {
|
||||
return false;
|
||||
}
|
||||
return !(failure != null ? !failure.equals(that.failure) : that.failure != null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = reason != null ? reason.hashCode() : 0;
|
||||
int result = reason.hashCode();
|
||||
result = 31 * result + Boolean.hashCode(delayed);
|
||||
result = 31 * result + Integer.hashCode(failedAllocations);
|
||||
result = 31 * result + Long.hashCode(unassignedTimeMillis);
|
||||
result = 31 * result + (message != null ? message.hashCode() : 0);
|
||||
result = 31 * result + (failure != null ? failure.hashCode() : 0);
|
||||
result = 31 * result + lastAllocationStatus.hashCode();
|
||||
return result;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
|
@ -232,7 +233,7 @@ public class AllocationService extends AbstractComponent {
|
|||
UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo();
|
||||
final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0;
|
||||
changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,
|
||||
failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false));
|
||||
failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false, AllocationStatus.NO_ATTEMPT));
|
||||
}
|
||||
if (!changed) {
|
||||
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
|
||||
|
@ -259,7 +260,8 @@ public class AllocationService extends AbstractComponent {
|
|||
if (newComputedLeftDelayNanos == 0) {
|
||||
changed = true;
|
||||
unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), unassignedInfo.getFailure(),
|
||||
unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), false));
|
||||
unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), false,
|
||||
unassignedInfo.getLastAllocationStatus()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -417,7 +419,7 @@ public class AllocationService extends AbstractComponent {
|
|||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0;
|
||||
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]",
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed);
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed, AllocationStatus.NO_ATTEMPT);
|
||||
applyFailedShard(allocation, shardRouting, false, unassignedInfo);
|
||||
}
|
||||
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
|
||||
|
@ -438,7 +440,8 @@ public class AllocationService extends AbstractComponent {
|
|||
for (ShardRouting routing : replicas) {
|
||||
changed |= applyFailedShard(allocation, routing, false,
|
||||
new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, "primary failed while replica initializing",
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false));
|
||||
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false,
|
||||
AllocationStatus.NO_ATTEMPT));
|
||||
}
|
||||
return changed;
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.RoutingNode;
|
|||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
|
@ -647,11 +648,12 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
for (int i = 0; i < primaryLength; i++) {
|
||||
ShardRouting shard = primary[i];
|
||||
if (!shard.primary()) {
|
||||
boolean drop = deciders.canAllocate(shard, allocation).type() == Type.NO;
|
||||
if (drop) {
|
||||
unassigned.ignoreShard(shard);
|
||||
final Decision decision = deciders.canAllocate(shard, allocation);
|
||||
if (decision.type() == Type.NO) {
|
||||
UnassignedInfo.AllocationStatus allocationStatus = UnassignedInfo.AllocationStatus.fromDecision(decision);
|
||||
changed |= unassigned.ignoreShard(shard, allocationStatus);
|
||||
while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
|
||||
unassigned.ignoreShard(primary[++i]);
|
||||
changed |= unassigned.ignoreShard(primary[++i], allocationStatus);
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
|
@ -701,9 +703,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
final int minNodeHigh = minNode.highestPrimary(shard.getIndexName());
|
||||
if ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) && (nodeHigh < minNodeHigh))
|
||||
|| (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)) {
|
||||
minNode = node;
|
||||
minWeight = currentWeight;
|
||||
decision = currentDecision;
|
||||
// nothing to set here; the minNode, minWeight, and decision get set below
|
||||
} else {
|
||||
break NOUPDATE;
|
||||
}
|
||||
|
@ -719,7 +719,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
}
|
||||
}
|
||||
}
|
||||
assert decision != null && minNode != null || decision == null && minNode == null;
|
||||
assert (decision == null) == (minNode == null);
|
||||
if (minNode != null) {
|
||||
final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation,
|
||||
ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
|
@ -735,10 +735,12 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
} else {
|
||||
minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize));
|
||||
final RoutingNode node = minNode.getRoutingNode();
|
||||
if (deciders.canAllocate(node, allocation).type() != Type.YES) {
|
||||
final Decision.Type nodeLevelDecision = deciders.canAllocate(node, allocation).type();
|
||||
if (nodeLevelDecision != Type.YES) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Can not allocate on node [{}] remove from round decision [{}]", node, decision.type());
|
||||
}
|
||||
assert nodeLevelDecision == Type.NO;
|
||||
throttledNodes.add(minNode);
|
||||
}
|
||||
}
|
||||
|
@ -748,10 +750,14 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("No Node found to assign shard [{}]", shard);
|
||||
}
|
||||
unassigned.ignoreShard(shard);
|
||||
assert decision == null || decision.type() == Type.THROTTLE;
|
||||
UnassignedInfo.AllocationStatus allocationStatus =
|
||||
decision == null ? UnassignedInfo.AllocationStatus.DECIDERS_NO :
|
||||
UnassignedInfo.AllocationStatus.fromDecision(decision);
|
||||
changed |= unassigned.ignoreShard(shard, allocationStatus);
|
||||
if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas
|
||||
while(secondaryLength > 0 && comparator.compare(shard, secondary[secondaryLength-1]) == 0) {
|
||||
unassigned.ignoreShard(secondary[--secondaryLength]);
|
||||
changed |= unassigned.ignoreShard(secondary[--secondaryLength], allocationStatus);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -39,8 +39,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Abstract base class for allocating an unassigned shard to a node
|
||||
|
|
|
@ -125,7 +125,8 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
|
|||
// we need to move the unassigned info back to treat it as if it was index creation
|
||||
unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
|
||||
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
|
||||
shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false);
|
||||
shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false,
|
||||
shardRouting.unassignedInfo().getLastAllocationStatus());
|
||||
}
|
||||
|
||||
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -29,11 +30,12 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* This abstract class defining basic {@link Decision} used during shard
|
||||
* allocation process.
|
||||
*
|
||||
*
|
||||
* @see AllocationDecider
|
||||
*/
|
||||
public abstract class Decision implements ToXContent {
|
||||
|
@ -44,7 +46,7 @@ public abstract class Decision implements ToXContent {
|
|||
public static final Decision THROTTLE = new Single(Type.THROTTLE);
|
||||
|
||||
/**
|
||||
* Creates a simple decision
|
||||
* Creates a simple decision
|
||||
* @param type {@link Type} of the decision
|
||||
* @param label label for the Decider that produced this decision
|
||||
* @param explanation explanation of the decision
|
||||
|
@ -95,10 +97,10 @@ public abstract class Decision implements ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* This enumeration defines the
|
||||
* possible types of decisions
|
||||
* This enumeration defines the
|
||||
* possible types of decisions
|
||||
*/
|
||||
public static enum Type {
|
||||
public enum Type {
|
||||
YES,
|
||||
NO,
|
||||
THROTTLE;
|
||||
|
@ -144,6 +146,7 @@ public abstract class Decision implements ToXContent {
|
|||
*/
|
||||
public abstract Type type();
|
||||
|
||||
@Nullable
|
||||
public abstract String label();
|
||||
|
||||
/**
|
||||
|
@ -166,7 +169,7 @@ public abstract class Decision implements ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Single} decision of a given type
|
||||
* Creates a new {@link Single} decision of a given type
|
||||
* @param type {@link Type} of the decision
|
||||
*/
|
||||
public Single(Type type) {
|
||||
|
@ -175,12 +178,12 @@ public abstract class Decision implements ToXContent {
|
|||
|
||||
/**
|
||||
* Creates a new {@link Single} decision of a given type
|
||||
*
|
||||
*
|
||||
* @param type {@link Type} of the decision
|
||||
* @param explanation An explanation of this {@link Decision}
|
||||
* @param explanationParams A set of additional parameters
|
||||
*/
|
||||
public Single(Type type, String label, String explanation, Object... explanationParams) {
|
||||
public Single(Type type, @Nullable String label, @Nullable String explanation, @Nullable Object... explanationParams) {
|
||||
this.type = type;
|
||||
this.label = label;
|
||||
this.explanation = explanation;
|
||||
|
@ -193,6 +196,7 @@ public abstract class Decision implements ToXContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
@Nullable
|
||||
public String label() {
|
||||
return this.label;
|
||||
}
|
||||
|
@ -205,6 +209,7 @@ public abstract class Decision implements ToXContent {
|
|||
/**
|
||||
* Returns the explanation string, fully formatted. Only formats the string once
|
||||
*/
|
||||
@Nullable
|
||||
public String getExplanation() {
|
||||
if (explanationString == null && explanation != null) {
|
||||
explanationString = String.format(Locale.ROOT, explanation, explanationParams);
|
||||
|
@ -224,15 +229,16 @@ public abstract class Decision implements ToXContent {
|
|||
|
||||
Decision.Single s = (Decision.Single) object;
|
||||
return this.type == s.type &&
|
||||
this.label.equals(s.label) &&
|
||||
this.getExplanation().equals(s.getExplanation());
|
||||
Objects.equals(label, s.label) &&
|
||||
Objects.equals(getExplanation(), s.getExplanation());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = this.type.hashCode();
|
||||
result = 31 * result + this.label.hashCode();
|
||||
result = 31 * result + this.getExplanation().hashCode();
|
||||
int result = type.hashCode();
|
||||
result = 31 * result + (label == null ? 0 : label.hashCode());
|
||||
String explanationStr = getExplanation();
|
||||
result = 31 * result + (explanationStr == null ? 0 : explanationStr.hashCode());
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -288,6 +294,7 @@ public abstract class Decision implements ToXContent {
|
|||
}
|
||||
|
||||
@Override
|
||||
@Nullable
|
||||
public String label() {
|
||||
// Multi decisions have no labels
|
||||
return null;
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation.decider;
|
|||
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
|
@ -177,7 +178,12 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
|
|||
if (shardRouting.unassigned()) {
|
||||
initializingShard = shardRouting.initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
} else if (shardRouting.initializing()) {
|
||||
initializingShard = shardRouting.moveToUnassigned(shardRouting.unassignedInfo())
|
||||
UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
|
||||
if (unassignedInfo == null) {
|
||||
// unassigned shards must have unassignedInfo (initializing shards might not)
|
||||
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "fake");
|
||||
}
|
||||
initializingShard = shardRouting.moveToUnassigned(unassignedInfo)
|
||||
.initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
|
||||
} else if (shardRouting.relocating()) {
|
||||
initializingShard = shardRouting.cancelRelocation()
|
||||
|
|
|
@ -84,9 +84,6 @@ import java.util.stream.Collectors;
|
|||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class ClusterService extends AbstractLifecycleComponent {
|
||||
|
||||
public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING =
|
||||
|
@ -348,6 +345,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
* @param source the source of the cluster state update task
|
||||
* @param updateTask the full context for the cluster state update
|
||||
* task
|
||||
*
|
||||
*/
|
||||
public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) {
|
||||
submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask);
|
||||
|
@ -371,6 +369,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
* @param listener callback after the cluster state update task
|
||||
* completes
|
||||
* @param <T> the type of the cluster state update task state
|
||||
*
|
||||
*/
|
||||
public <T> void submitStateUpdateTask(final String source, final T task,
|
||||
final ClusterStateTaskConfig config,
|
||||
|
@ -390,6 +389,7 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
* that share the same executor will be executed
|
||||
* batches on this executor
|
||||
* @param <T> the type of the cluster state update task state
|
||||
*
|
||||
*/
|
||||
public <T> void submitStateUpdateTasks(final String source,
|
||||
final Map<T, ClusterStateTaskListener> tasks, final ClusterStateTaskConfig config,
|
||||
|
@ -411,7 +411,8 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
List<UpdateTask> existingTasks = updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>());
|
||||
for (@SuppressWarnings("unchecked") UpdateTask<T> existing : existingTasks) {
|
||||
if (tasksIdentity.containsKey(existing.task)) {
|
||||
throw new IllegalArgumentException("task [" + existing.task + "] is already queued");
|
||||
throw new IllegalStateException("task [" + executor.describeTasks(Collections.singletonList(existing.task)) +
|
||||
"] with source [" + source + "] is already queued");
|
||||
}
|
||||
}
|
||||
existingTasks.addAll(updateTasks);
|
||||
|
@ -517,11 +518,11 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
if (pending != null) {
|
||||
for (UpdateTask<T> task : pending) {
|
||||
if (task.processed.getAndSet(true) == false) {
|
||||
logger.trace("will process [{}[{}]]", task.source, task.task);
|
||||
logger.trace("will process {}", task.toString(executor));
|
||||
toExecute.add(task);
|
||||
processTasksBySource.computeIfAbsent(task.source, s -> new ArrayList<>()).add(task.task);
|
||||
} else {
|
||||
logger.trace("skipping [{}[{}]], already processed", task.source, task.task);
|
||||
logger.trace("skipping {}, already processed", task.toString(executor));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -571,7 +572,8 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
assert (assertsEnabled = true);
|
||||
if (assertsEnabled) {
|
||||
for (UpdateTask<T> updateTask : toExecute) {
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing task result for [" + updateTask.task + "]";
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) :
|
||||
"missing task result for " + updateTask.toString(executor);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -579,13 +581,13 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
|
||||
// fail all tasks that have failed and extract those that are waiting for results
|
||||
for (UpdateTask<T> updateTask : toExecute) {
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString();
|
||||
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.toString(executor);
|
||||
final ClusterStateTaskExecutor.TaskResult executionResult =
|
||||
batchResult.executionResults.get(updateTask.task);
|
||||
executionResult.handle(
|
||||
() -> proccessedListeners.add(updateTask),
|
||||
ex -> {
|
||||
logger.debug("cluster state update task [{}] failed", ex, updateTask.source);
|
||||
logger.debug("cluster state update task {} failed", ex, updateTask.toString(executor));
|
||||
updateTask.listener.onFailure(updateTask.source, ex);
|
||||
}
|
||||
);
|
||||
|
@ -854,6 +856,15 @@ public class ClusterService extends AbstractLifecycleComponent {
|
|||
public void run() {
|
||||
runTasksForExecutor(executor);
|
||||
}
|
||||
|
||||
public String toString(ClusterStateTaskExecutor<T> executor) {
|
||||
String taskDescription = executor.describeTasks(Collections.singletonList(task));
|
||||
if (taskDescription.isEmpty()) {
|
||||
return "[" + source + "]";
|
||||
} else {
|
||||
return "[" + source + "[" + taskDescription + "]]";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void warnAboutSlowTaskIfNeeded(TimeValue executionTime, String source) {
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common;
|
||||
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -23,94 +24,48 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public final class Priority implements Comparable<Priority> {
|
||||
public enum Priority {
|
||||
|
||||
IMMEDIATE((byte) 0),
|
||||
URGENT((byte) 1),
|
||||
HIGH((byte) 2),
|
||||
NORMAL((byte) 3),
|
||||
LOW((byte) 4),
|
||||
LANGUID((byte) 5);
|
||||
|
||||
public static Priority readFrom(StreamInput input) throws IOException {
|
||||
return fromByte(input.readByte());
|
||||
}
|
||||
|
||||
public static void writeTo(Priority priority, StreamOutput output) throws IOException {
|
||||
byte b = priority.value;
|
||||
output.writeByte(b);
|
||||
output.writeByte(priority.value);
|
||||
}
|
||||
|
||||
public static Priority fromByte(byte b) {
|
||||
switch (b) {
|
||||
case -1: return IMMEDIATE;
|
||||
case 0: return URGENT;
|
||||
case 1: return HIGH;
|
||||
case 2: return NORMAL;
|
||||
case 3: return LOW;
|
||||
case 4: return LANGUID;
|
||||
case 0: return IMMEDIATE;
|
||||
case 1: return URGENT;
|
||||
case 2: return HIGH;
|
||||
case 3: return NORMAL;
|
||||
case 4: return LOW;
|
||||
case 5: return LANGUID;
|
||||
default:
|
||||
throw new IllegalArgumentException("can't find priority for [" + b + "]");
|
||||
}
|
||||
}
|
||||
|
||||
public static final Priority IMMEDIATE = new Priority((byte) -1);
|
||||
public static final Priority URGENT = new Priority((byte) 0);
|
||||
public static final Priority HIGH = new Priority((byte) 1);
|
||||
public static final Priority NORMAL = new Priority((byte) 2);
|
||||
public static final Priority LOW = new Priority((byte) 3);
|
||||
public static final Priority LANGUID = new Priority((byte) 4);
|
||||
private static final Priority[] values = new Priority[] { IMMEDIATE, URGENT, HIGH, NORMAL, LOW, LANGUID };
|
||||
|
||||
private final byte value;
|
||||
|
||||
private Priority(byte value) {
|
||||
Priority(byte value) {
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return an array of all available priorities, sorted from the highest to the lowest.
|
||||
*/
|
||||
public static Priority[] values() {
|
||||
return values;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(Priority p) {
|
||||
return (this.value < p.value) ? -1 : ((this.value > p.value) ? 1 : 0);
|
||||
}
|
||||
|
||||
public boolean after(Priority p) {
|
||||
return value > p.value;
|
||||
return this.compareTo(p) > 0;
|
||||
}
|
||||
|
||||
public boolean sameOrAfter(Priority p) {
|
||||
return value >= p.value;
|
||||
return this.compareTo(p) >= 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || Priority.class != o.getClass()) return false;
|
||||
|
||||
Priority priority = (Priority) o;
|
||||
|
||||
if (value != priority.value) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return (int) value;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
switch (value) {
|
||||
case (byte) -1: return "IMMEDIATE";
|
||||
case (byte) 0: return "URGENT";
|
||||
case (byte) 1: return "HIGH";
|
||||
case (byte) 2: return "NORMAL";
|
||||
case (byte) 3: return "LOW";
|
||||
default:
|
||||
return "LANGUID";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -99,35 +99,6 @@ public interface BlobContainer {
|
|||
*/
|
||||
void deleteBlob(String blobName) throws IOException;
|
||||
|
||||
/**
|
||||
* Deletes blobs with the given names. If any subset of the names do not exist in the container, this method has no
|
||||
* effect for those names, and will delete the blobs for those names that do exist. If any of the blobs failed
|
||||
* to delete, those blobs that were processed before it and successfully deleted will remain deleted. An exception
|
||||
* is thrown at the first blob entry that fails to delete (TODO: is this the right behavior? Should we collect
|
||||
* all the failed deletes into a single IOException instead?)
|
||||
*
|
||||
* TODO: remove, see https://github.com/elastic/elasticsearch/issues/18529
|
||||
*
|
||||
* @param blobNames
|
||||
* The collection of blob names to delete from the container.
|
||||
* @throws IOException if any of the blobs in the collection exists but could not be deleted.
|
||||
*/
|
||||
void deleteBlobs(Collection<String> blobNames) throws IOException;
|
||||
|
||||
/**
|
||||
* Deletes all blobs in the container that match the specified prefix. If any of the blobs failed to delete,
|
||||
* those blobs that were processed before it and successfully deleted will remain deleted. An exception is
|
||||
* thrown at the first blob entry that fails to delete (TODO: is this the right behavior? Should we collect
|
||||
* all the failed deletes into a single IOException instead?)
|
||||
*
|
||||
* TODO: remove, see: https://github.com/elastic/elasticsearch/issues/18529
|
||||
*
|
||||
* @param blobNamePrefix
|
||||
* The prefix to match against blob names in the container. Any blob whose name has the prefix will be deleted.
|
||||
* @throws IOException if any of the matching blobs failed to delete.
|
||||
*/
|
||||
void deleteBlobsByPrefix(String blobNamePrefix) throws IOException;
|
||||
|
||||
/**
|
||||
* Lists all blobs in the container.
|
||||
*
|
||||
|
|
|
@ -45,21 +45,6 @@ public abstract class AbstractBlobContainer implements BlobContainer {
|
|||
return this.path;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBlobsByPrefix(final String blobNamePrefix) throws IOException {
|
||||
Map<String, BlobMetaData> blobs = listBlobsByPrefix(blobNamePrefix);
|
||||
for (BlobMetaData blob : blobs.values()) {
|
||||
deleteBlob(blob.name());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteBlobs(Collection<String> blobNames) throws IOException {
|
||||
for (String blob: blobNames) {
|
||||
deleteBlob(blob);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String blobName, BytesReference bytes) throws IOException {
|
||||
try (InputStream stream = bytes.streamInput()) {
|
||||
|
|
|
@ -19,11 +19,9 @@
|
|||
|
||||
package org.elasticsearch.common.compress;
|
||||
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ import java.util.ArrayList;
|
|||
import java.util.Collection;
|
||||
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.geo.Rectangle;
|
||||
import org.apache.lucene.util.BitUtil;
|
||||
|
||||
/**
|
||||
|
@ -176,6 +177,26 @@ public class GeoHashUtils {
|
|||
return BASE_32[((x & 1) + ((y & 1) * 2) + ((x & 2) * 2) + ((y & 2) * 4) + ((x & 4) * 4)) % 32];
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the bounding box coordinates from a given geohash
|
||||
*
|
||||
* @param geohash Geohash of the defined cell
|
||||
* @return GeoRect rectangle defining the bounding box
|
||||
*/
|
||||
public static Rectangle bbox(final String geohash) {
|
||||
// bottom left is the coordinate
|
||||
GeoPoint bottomLeft = GeoPoint.fromGeohash(geohash);
|
||||
long ghLong = longEncode(geohash);
|
||||
// shift away the level
|
||||
ghLong >>>= 4;
|
||||
// deinterleave and add 1 to lat and lon to get topRight
|
||||
long lat = BitUtil.deinterleave(ghLong >>> 1) + 1;
|
||||
long lon = BitUtil.deinterleave(ghLong) + 1;
|
||||
GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lon, (int)lat) << 4 | geohash.length());
|
||||
|
||||
return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon());
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate all neighbors of a given geohash cell.
|
||||
*
|
||||
|
|
|
@ -38,12 +38,6 @@ import java.util.Map;
|
|||
* An injector can also {@link #injectMembers(Object) inject the dependencies} of
|
||||
* already-constructed instances. This can be used to interoperate with objects created by other
|
||||
* frameworks or services.
|
||||
* <p>
|
||||
* Injectors can be {@link #createChildInjector(Iterable) hierarchical}. Child injectors inherit
|
||||
* the configuration of their parent injectors, but the converse does not hold.
|
||||
* <p>
|
||||
* The injector's {@link #getBindings() internal bindings} are available for introspection. This
|
||||
* enables tools and extensions to operate on an injector reflectively.
|
||||
*
|
||||
* @author crazybob@google.com (Bob Lee)
|
||||
* @author jessewilson@google.com (Jesse Wilson)
|
||||
|
@ -87,41 +81,6 @@ public interface Injector {
|
|||
*/
|
||||
<T> MembersInjector<T> getMembersInjector(Class<T> type);
|
||||
|
||||
/**
|
||||
* Returns all explicit bindings.
|
||||
* <p>
|
||||
* The returned map does not include bindings inherited from a {@link #getParent() parent
|
||||
* injector}, should one exist. The returned map is guaranteed to iterate (for example, with
|
||||
* its {@link java.util.Map#entrySet()} iterator) in the order of insertion. In other words,
|
||||
* the order in which bindings appear in user Modules.
|
||||
* <p>
|
||||
* This method is part of the Guice SPI and is intended for use by tools and extensions.
|
||||
*/
|
||||
Map<Key<?>, Binding<?>> getBindings();
|
||||
|
||||
/**
|
||||
* Returns the binding for the given injection key. This will be an explicit bindings if the key
|
||||
* was bound explicitly by a module, or an implicit binding otherwise. The implicit binding will
|
||||
* be created if necessary.
|
||||
* <p>
|
||||
* This method is part of the Guice SPI and is intended for use by tools and extensions.
|
||||
*
|
||||
* @throws ConfigurationException if this injector cannot find or create the binding.
|
||||
*/
|
||||
<T> Binding<T> getBinding(Key<T> key);
|
||||
|
||||
/**
|
||||
* Returns the binding for the given type. This will be an explicit bindings if the injection key
|
||||
* was bound explicitly by a module, or an implicit binding otherwise. The implicit binding will
|
||||
* be created if necessary.
|
||||
* <p>
|
||||
* This method is part of the Guice SPI and is intended for use by tools and extensions.
|
||||
*
|
||||
* @throws ConfigurationException if this injector cannot find or create the binding.
|
||||
* @since 2.0
|
||||
*/
|
||||
<T> Binding<T> getBinding(Class<T> type);
|
||||
|
||||
/**
|
||||
* Returns all explicit bindings for {@code type}.
|
||||
* <p>
|
||||
|
@ -166,45 +125,4 @@ public interface Injector {
|
|||
* @throws ProvisionException if there was a runtime failure while providing an instance.
|
||||
*/
|
||||
<T> T getInstance(Class<T> type);
|
||||
|
||||
/**
|
||||
* Returns this injector's parent, or {@code null} if this is a top-level injector.
|
||||
*
|
||||
* @since 2.0
|
||||
*/
|
||||
Injector getParent();
|
||||
|
||||
/**
|
||||
* Returns a new injector that inherits all state from this injector. All bindings, scopes,
|
||||
* interceptors and type converters are inherited -- they are visible to the child injector.
|
||||
* Elements of the child injector are not visible to its parent.
|
||||
* <p>
|
||||
* Just-in-time bindings created for child injectors will be created in an ancestor injector
|
||||
* whenever possible. This allows for scoped instances to be shared between injectors. Use
|
||||
* explicit bindings to prevent bindings from being shared with the parent injector.
|
||||
* <p>
|
||||
* No key may be bound by both an injector and one of its ancestors. This includes just-in-time
|
||||
* bindings. The lone exception is the key for {@code Injector.class}, which is bound by each
|
||||
* injector to itself.
|
||||
*
|
||||
* @since 2.0
|
||||
*/
|
||||
Injector createChildInjector(Iterable<? extends Module> modules);
|
||||
|
||||
/**
|
||||
* Returns a new injector that inherits all state from this injector. All bindings, scopes,
|
||||
* interceptors and type converters are inherited -- they are visible to the child injector.
|
||||
* Elements of the child injector are not visible to its parent.
|
||||
* <p>
|
||||
* Just-in-time bindings created for child injectors will be created in an ancestor injector
|
||||
* whenever possible. This allows for scoped instances to be shared between injectors. Use
|
||||
* explicit bindings to prevent bindings from being shared with the parent injector.
|
||||
* <p>
|
||||
* No key may be bound by both an injector and one of its ancestors. This includes just-in-time
|
||||
* bindings. The lone exception is the key for {@code Injector.class}, which is bound by each
|
||||
* injector to itself.
|
||||
*
|
||||
* @since 2.0
|
||||
*/
|
||||
Injector createChildInjector(Module... modules);
|
||||
}
|
||||
|
|
|
@ -74,15 +74,6 @@ class InjectorBuilder {
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the parent of the injector to-be-constructed. As a side effect, this sets this injector's
|
||||
* stage to the stage of {@code parent}.
|
||||
*/
|
||||
InjectorBuilder parentInjector(InjectorImpl parent) {
|
||||
shellBuilder.parent(parent);
|
||||
return stage(parent.getInstance(Stage.class));
|
||||
}
|
||||
|
||||
InjectorBuilder addModules(Iterable<? extends Module> modules) {
|
||||
shellBuilder.addModules(modules);
|
||||
return this;
|
||||
|
@ -102,11 +93,6 @@ class InjectorBuilder {
|
|||
initializeStatically();
|
||||
}
|
||||
|
||||
// If we're in the tool stage, stop here. Don't eagerly inject or load anything.
|
||||
if (stage == Stage.TOOL) {
|
||||
return new ToolStageInjector(primaryInjector());
|
||||
}
|
||||
|
||||
injectDynamically();
|
||||
|
||||
return primaryInjector();
|
||||
|
@ -217,92 +203,4 @@ class InjectorBuilder {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* {@link Injector} exposed to users in {@link Stage#TOOL}.
|
||||
*/
|
||||
static class ToolStageInjector implements Injector {
|
||||
private final Injector delegateInjector;
|
||||
|
||||
ToolStageInjector(Injector delegateInjector) {
|
||||
this.delegateInjector = delegateInjector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void injectMembers(Object o) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Injector.injectMembers(Object) is not supported in Stage.TOOL");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<Key<?>, Binding<?>> getBindings() {
|
||||
return this.delegateInjector.getBindings();
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> Binding<T> getBinding(Key<T> key) {
|
||||
return this.delegateInjector.getBinding(key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> Binding<T> getBinding(Class<T> type) {
|
||||
return this.delegateInjector.getBinding(type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> List<Binding<T>> findBindingsByType(TypeLiteral<T> type) {
|
||||
return this.delegateInjector.findBindingsByType(type);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector getParent() {
|
||||
return delegateInjector.getParent();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector createChildInjector(Iterable<? extends Module> modules) {
|
||||
return delegateInjector.createChildInjector(modules);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector createChildInjector(Module... modules) {
|
||||
return delegateInjector.createChildInjector(modules);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> Provider<T> getProvider(Key<T> key) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Injector.getProvider(Key<T>) is not supported in Stage.TOOL");
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> Provider<T> getProvider(Class<T> type) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Injector.getProvider(Class<T>) is not supported in Stage.TOOL");
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> MembersInjector<T> getMembersInjector(TypeLiteral<T> typeLiteral) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Injector.getMembersInjector(TypeLiteral<T>) is not supported in Stage.TOOL");
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> MembersInjector<T> getMembersInjector(Class<T> type) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Injector.getMembersInjector(Class<T>) is not supported in Stage.TOOL");
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T getInstance(Key<T> key) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Injector.getInstance(Key<T>) is not supported in Stage.TOOL");
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> T getInstance(Class<T> type) {
|
||||
throw new UnsupportedOperationException(
|
||||
"Injector.getInstance(Class<T>) is not supported in Stage.TOOL");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,6 @@ import static org.elasticsearch.common.inject.internal.Annotations.findScopeAnno
|
|||
*/
|
||||
class InjectorImpl implements Injector, Lookups {
|
||||
final State state;
|
||||
final InjectorImpl parent;
|
||||
boolean readOnly;
|
||||
BindingsMultimap bindingsMultimap = new BindingsMultimap();
|
||||
final Initializer initializer;
|
||||
|
@ -76,16 +75,10 @@ class InjectorImpl implements Injector, Lookups {
|
|||
|
||||
Lookups lookups = new DeferredLookups(this);
|
||||
|
||||
InjectorImpl(@Nullable InjectorImpl parent, State state, Initializer initializer) {
|
||||
this.parent = parent;
|
||||
InjectorImpl(State state, Initializer initializer) {
|
||||
this.state = state;
|
||||
this.initializer = initializer;
|
||||
|
||||
if (parent != null) {
|
||||
localContext = parent.localContext;
|
||||
} else {
|
||||
localContext = new ThreadLocal<>();
|
||||
}
|
||||
localContext = new ThreadLocal<>();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -106,21 +99,6 @@ class InjectorImpl implements Injector, Lookups {
|
|||
return bindingsMultimap.getAll(type);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the binding for {@code key}
|
||||
*/
|
||||
@Override
|
||||
public <T> BindingImpl<T> getBinding(Key<T> key) {
|
||||
Errors errors = new Errors(key);
|
||||
try {
|
||||
BindingImpl<T> result = getBindingOrThrow(key, errors);
|
||||
errors.throwConfigurationExceptionIfErrorsExist();
|
||||
return result;
|
||||
} catch (ErrorsException e) {
|
||||
throw new ConfigurationException(errors.merge(e.getErrors()).getMessages());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets a binding implementation. First, it check to see if the parent has a binding. If the
|
||||
* parent has a binding and the binding is scoped, it will use that binding. Otherwise, this
|
||||
|
@ -139,29 +117,6 @@ class InjectorImpl implements Injector, Lookups {
|
|||
return getJustInTimeBinding(key, errors);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T> Binding<T> getBinding(Class<T> type) {
|
||||
return getBinding(Key.get(type));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector getParent() {
|
||||
return parent;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector createChildInjector(Iterable<? extends Module> modules) {
|
||||
return new InjectorBuilder()
|
||||
.parentInjector(this)
|
||||
.addModules(modules)
|
||||
.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Injector createChildInjector(Module... modules) {
|
||||
return createChildInjector(Arrays.asList(modules));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a just-in-time binding for {@code key}, creating it if necessary.
|
||||
*
|
||||
|
@ -171,13 +126,11 @@ class InjectorImpl implements Injector, Lookups {
|
|||
throws ErrorsException {
|
||||
synchronized (state.lock()) {
|
||||
// first try to find a JIT binding that we've already created
|
||||
for (InjectorImpl injector = this; injector != null; injector = injector.parent) {
|
||||
@SuppressWarnings("unchecked") // we only store bindings that match their key
|
||||
BindingImpl<T> binding = (BindingImpl<T>) injector.jitBindings.get(key);
|
||||
@SuppressWarnings("unchecked") // we only store bindings that match their key
|
||||
BindingImpl<T> binding = (BindingImpl<T>) jitBindings.get(key);
|
||||
|
||||
if (binding != null) {
|
||||
return binding;
|
||||
}
|
||||
if (binding != null) {
|
||||
return binding;
|
||||
}
|
||||
|
||||
return createJustInTimeBindingRecursive(key, errors);
|
||||
|
@ -600,14 +553,6 @@ class InjectorImpl implements Injector, Lookups {
|
|||
*/
|
||||
private <T> BindingImpl<T> createJustInTimeBindingRecursive(Key<T> key, Errors errors)
|
||||
throws ErrorsException {
|
||||
// ask the parent to create the JIT binding
|
||||
if (parent != null && !parent.readOnly /* ES: don't check on parent if its read only, its already created all the bindings it can*/) {
|
||||
try {
|
||||
return parent.createJustInTimeBindingRecursive(key, new Errors());
|
||||
} catch (ErrorsException ignored) {
|
||||
}
|
||||
}
|
||||
|
||||
if (state.isBlacklisted(key)) {
|
||||
throw errors.childBindingAlreadySet(key).toException();
|
||||
}
|
||||
|
@ -686,12 +631,6 @@ class InjectorImpl implements Injector, Lookups {
|
|||
return getBindingOrThrow(key, errors).getInternalFactory();
|
||||
}
|
||||
|
||||
// not test-covered
|
||||
@Override
|
||||
public Map<Key<?>, Binding<?>> getBindings() {
|
||||
return state.getExplicitBindingsThisLevel();
|
||||
}
|
||||
|
||||
private static class BindingsMultimap {
|
||||
final Map<TypeLiteral<?>, List<Binding<?>>> multimap = new HashMap<>();
|
||||
|
||||
|
|
|
@ -50,18 +50,12 @@ class InjectorShell {
|
|||
|
||||
private final List<Element> elements;
|
||||
private final InjectorImpl injector;
|
||||
private final PrivateElements privateElements;
|
||||
|
||||
private InjectorShell(Builder builder, List<Element> elements, InjectorImpl injector) {
|
||||
this.privateElements = builder.privateElements;
|
||||
private InjectorShell(List<Element> elements, InjectorImpl injector) {
|
||||
this.elements = elements;
|
||||
this.injector = injector;
|
||||
}
|
||||
|
||||
PrivateElements getPrivateElements() {
|
||||
return privateElements;
|
||||
}
|
||||
|
||||
InjectorImpl getInjector() {
|
||||
return injector;
|
||||
}
|
||||
|
@ -134,7 +128,7 @@ class InjectorShell {
|
|||
throw new IllegalStateException("no state. Did you remember to lock() ?");
|
||||
}
|
||||
|
||||
InjectorImpl injector = new InjectorImpl(parent, state, initializer);
|
||||
InjectorImpl injector = new InjectorImpl(state, initializer);
|
||||
if (privateElements != null) {
|
||||
privateElements.initInjector(injector);
|
||||
}
|
||||
|
@ -167,7 +161,7 @@ class InjectorShell {
|
|||
stopwatch.resetAndLog("Binding creation");
|
||||
|
||||
List<InjectorShell> injectorShells = new ArrayList<>();
|
||||
injectorShells.add(new InjectorShell(this, elements, injector));
|
||||
injectorShells.add(new InjectorShell(elements, injector));
|
||||
|
||||
// recursively build child shells
|
||||
PrivateElementProcessor processor = new PrivateElementProcessor(errors, stage);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue