Merge branch 'master' into pr/s3-path-style-access

This commit is contained in:
David Pilato 2016-07-19 12:55:25 +02:00
commit c6c5a1b7c8
681 changed files with 11372 additions and 7873 deletions

View File

@ -173,6 +173,11 @@ subprojects {
"org.elasticsearch.distribution.rpm:elasticsearch:${version}": ':distribution:rpm',
"org.elasticsearch.distribution.deb:elasticsearch:${version}": ':distribution:deb',
"org.elasticsearch.test:logger-usage:${version}": ':test:logger-usage',
// for transport client
"org.elasticsearch.plugin:transport-netty3-client:${version}": ':modules:transport-netty3',
"org.elasticsearch.plugin:reindex-client:${version}": ':modules:reindex',
"org.elasticsearch.plugin:lang-mustache-client:${version}": ':modules:lang-mustache',
"org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator',
]
configurations.all {
resolutionStrategy.dependencySubstitution { DependencySubstitutions subs ->

View File

@ -19,7 +19,6 @@
package org.elasticsearch.gradle
import nebula.plugin.extraconfigurations.ProvidedBasePlugin
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
import org.elasticsearch.gradle.precommit.PrecommitTasks
import org.gradle.api.GradleException
import org.gradle.api.JavaVersion
@ -35,6 +34,7 @@ import org.gradle.api.artifacts.ResolvedArtifact
import org.gradle.api.artifacts.dsl.RepositoryHandler
import org.gradle.api.artifacts.maven.MavenPom
import org.gradle.api.publish.maven.MavenPublication
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
import org.gradle.api.publish.maven.tasks.GenerateMavenPom
import org.gradle.api.tasks.bundling.Jar
import org.gradle.api.tasks.compile.JavaCompile
@ -344,7 +344,7 @@ class BuildPlugin implements Plugin<Project> {
/**Configuration generation of maven poms. */
public static void configurePomGeneration(Project project) {
project.plugins.withType(MavenBasePublishPlugin.class).whenPluginAdded {
project.plugins.withType(MavenPublishPlugin.class).whenPluginAdded {
project.publishing {
publications {
all { MavenPublication publication -> // we only deal with maven

View File

@ -184,13 +184,6 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
current.println('---')
current.println("setup:")
body(setup, true)
// always wait for yellow before anything is executed
current.println(
" - do:\n" +
" raw:\n" +
" method: GET\n" +
" path: \"_cluster/health\"\n" +
" wait_for_status: \"yellow\"")
}
private void body(Snippet snippet, boolean inSetup) {

View File

@ -18,14 +18,23 @@
*/
package org.elasticsearch.gradle.plugin
import nebula.plugin.publishing.maven.MavenBasePublishPlugin
import nebula.plugin.publishing.maven.MavenScmPlugin
import org.elasticsearch.gradle.BuildPlugin
import org.elasticsearch.gradle.test.RestIntegTestTask
import org.elasticsearch.gradle.test.RunTask
import org.gradle.api.Project
import org.gradle.api.Task
import org.gradle.api.XmlProvider
import org.gradle.api.publish.maven.MavenPublication
import org.gradle.api.publish.maven.plugins.MavenPublishPlugin
import org.gradle.api.tasks.SourceSet
import org.gradle.api.tasks.bundling.Zip
import java.nio.file.Files
import java.nio.file.Path
import java.nio.file.StandardCopyOption
import java.util.regex.Matcher
import java.util.regex.Pattern
/**
* Encapsulates build configuration for an Elasticsearch plugin.
*/
@ -38,19 +47,35 @@ public class PluginBuildPlugin extends BuildPlugin {
// this afterEvaluate must happen before the afterEvaluate added by integTest creation,
// so that the file name resolution for installing the plugin will be setup
project.afterEvaluate {
boolean isModule = project.path.startsWith(':modules:')
String name = project.pluginProperties.extension.name
project.jar.baseName = name
project.bundlePlugin.baseName = name
if (project.pluginProperties.extension.hasClientJar) {
// for plugins which work with the transport client, we copy the jar
// file to a new name, copy the nebula generated pom to the same name,
// and generate a different pom for the zip
project.signArchives.enabled = false
addJarPomGeneration(project)
addClientJarTask(project)
if (isModule == false) {
addZipPomGeneration(project)
}
} else {
// no client plugin, so use the pom file from nebula, without jar, for the zip
project.ext.set("nebulaPublish.maven.jar", false)
}
project.integTest.dependsOn(project.bundlePlugin)
project.tasks.run.dependsOn(project.bundlePlugin)
if (project.path.startsWith(':modules:')) {
if (isModule) {
project.integTest.clusterConfig.module(project)
project.tasks.run.clusterConfig.module(project)
} else {
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
addPomGeneration(project)
project.integTest.clusterConfig.plugin(project.path)
project.tasks.run.clusterConfig.plugin(project.path)
addZipPomGeneration(project)
}
project.namingConventions {
@ -60,6 +85,7 @@ public class PluginBuildPlugin extends BuildPlugin {
}
createIntegTestTask(project)
createBundleTask(project)
project.configurations.getByName('default').extendsFrom(project.configurations.getByName('runtime'))
project.tasks.create('run', RunTask) // allow running ES with this plugin in the foreground of a build
}
@ -118,40 +144,93 @@ public class PluginBuildPlugin extends BuildPlugin {
}
project.assemble.dependsOn(bundle)
// remove jar from the archives (things that will be published), and set it to the zip
project.configurations.archives.artifacts.removeAll { it.archiveTask.is project.jar }
project.artifacts.add('archives', bundle)
// also make the zip the default artifact (used when depending on this project)
project.configurations.getByName('default').extendsFrom = []
project.artifacts.add('default', bundle)
// also make the zip available as a configuration (used when depending on this project)
project.configurations.create('zip')
project.artifacts.add('zip', bundle)
}
/**
* Adds the plugin jar and zip as publications.
*/
protected static void addPomGeneration(Project project) {
project.plugins.apply(MavenBasePublishPlugin.class)
project.plugins.apply(MavenScmPlugin.class)
/** Adds a task to move jar and associated files to a "-client" name. */
protected static void addClientJarTask(Project project) {
Task clientJar = project.tasks.create('clientJar')
clientJar.dependsOn('generatePomFileForJarPublication', project.jar, project.javadocJar, project.sourcesJar)
clientJar.doFirst {
Path jarFile = project.jar.outputs.files.singleFile.toPath()
String clientFileName = jarFile.fileName.toString().replace(project.version, "client-${project.version}")
Files.copy(jarFile, jarFile.resolveSibling(clientFileName), StandardCopyOption.REPLACE_EXISTING)
String pomFileName = jarFile.fileName.toString().replace('.jar', '.pom')
String clientPomFileName = clientFileName.replace('.jar', '.pom')
Files.copy(jarFile.resolveSibling(pomFileName), jarFile.resolveSibling(clientPomFileName),
StandardCopyOption.REPLACE_EXISTING)
String sourcesFileName = jarFile.fileName.toString().replace('.jar', '-sources.jar')
String clientSourcesFileName = clientFileName.replace('.jar', '-sources.jar')
Files.copy(jarFile.resolveSibling(sourcesFileName), jarFile.resolveSibling(clientSourcesFileName),
StandardCopyOption.REPLACE_EXISTING)
String javadocFileName = jarFile.fileName.toString().replace('.jar', '-javadoc.jar')
String clientJavadocFileName = clientFileName.replace('.jar', '-javadoc.jar')
Files.copy(jarFile.resolveSibling(javadocFileName), jarFile.resolveSibling(clientJavadocFileName),
StandardCopyOption.REPLACE_EXISTING)
}
project.assemble.dependsOn(clientJar)
}
static final Pattern GIT_PATTERN = Pattern.compile(/git@([^:]+):([^\.]+)\.git/)
/** Find the reponame. */
protected static String urlFromOrigin(String origin) {
if (origin.startsWith('https')) {
return origin
}
Matcher matcher = GIT_PATTERN.matcher(origin)
if (matcher.matches()) {
return "https://${matcher.group(1)}/${matcher.group(2)}"
} else {
return origin // best effort, the url doesnt really matter, it is just required by maven central
}
}
/** Adds nebula publishing task to generate a pom file for the plugin. */
protected static void addJarPomGeneration(Project project) {
project.plugins.apply(MavenPublishPlugin.class)
project.publishing {
publications {
nebula {
artifact project.bundlePlugin
pom.withXml {
// overwrite the name/description in the pom nebula set up
Node root = asNode()
for (Node node : root.children()) {
if (node.name() == 'name') {
node.setValue(project.pluginProperties.extension.name)
} else if (node.name() == 'description') {
node.setValue(project.pluginProperties.extension.description)
}
}
jar(MavenPublication) {
from project.components.java
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.appendNode('name', project.pluginProperties.extension.name)
root.appendNode('description', project.pluginProperties.extension.description)
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
Node scmNode = root.appendNode('scm')
scmNode.appendNode('url', project.scminfo.origin)
}
}
}
}
}
/** Adds a task to generate a*/
protected void addZipPomGeneration(Project project) {
project.plugins.apply(MavenPublishPlugin.class)
project.publishing {
publications {
zip(MavenPublication) {
artifact project.bundlePlugin
pom.packaging = 'pom'
pom.withXml { XmlProvider xml ->
Node root = xml.asNode()
root.appendNode('name', project.pluginProperties.extension.name)
root.appendNode('description', project.pluginProperties.extension.description)
root.appendNode('url', urlFromOrigin(project.scminfo.origin))
Node scmNode = root.appendNode('scm')
scmNode.appendNode('url', project.scminfo.origin)
}
}
}
}
}
}

View File

@ -39,6 +39,10 @@ class PluginPropertiesExtension {
@Input
String classname
/** Indicates whether the plugin jar should be made available for the transport client. */
@Input
boolean hasClientJar = false
PluginPropertiesExtension(Project project) {
name = project.name
version = project.version

View File

@ -20,12 +20,15 @@ package org.elasticsearch.gradle.test
import org.gradle.api.GradleException
import org.gradle.api.Project
import org.gradle.api.artifacts.Configuration
import org.gradle.api.file.FileCollection
import org.gradle.api.tasks.Input
/** Configuration for an elasticsearch cluster, used for integration tests. */
class ClusterConfiguration {
private final Project project
@Input
String distribution = 'integ-test-zip'
@ -77,6 +80,10 @@ class ClusterConfiguration {
return tmpFile.exists()
}
public ClusterConfiguration(Project project) {
this.project = project
}
Map<String, String> systemProperties = new HashMap<>()
Map<String, String> settings = new HashMap<>()
@ -84,7 +91,7 @@ class ClusterConfiguration {
// map from destination path, to source file
Map<String, Object> extraConfigFiles = new HashMap<>()
LinkedHashMap<String, Object> plugins = new LinkedHashMap<>()
LinkedHashMap<String, Project> plugins = new LinkedHashMap<>()
List<Project> modules = new ArrayList<>()
@ -101,13 +108,9 @@ class ClusterConfiguration {
}
@Input
void plugin(String name, FileCollection file) {
plugins.put(name, file)
}
@Input
void plugin(String name, Project pluginProject) {
plugins.put(name, pluginProject)
void plugin(String path) {
Project pluginProject = project.project(path)
plugins.put(pluginProject.name, pluginProject)
}
/** Add a module to the cluster. The project must be an esplugin and have a single zip default artifact. */

View File

@ -167,7 +167,7 @@ class ClusterFormationTasks {
}
// install plugins
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
String actionName = pluginTaskName('install', plugin.getKey(), 'Plugin')
setup = configureInstallPluginTask(taskName(task, node, actionName), project, setup, node, plugin.getValue())
}
@ -326,38 +326,34 @@ class ClusterFormationTasks {
Copy copyPlugins = project.tasks.create(name: name, type: Copy, dependsOn: setup)
List<FileCollection> pluginFiles = []
for (Map.Entry<String, Object> plugin : node.config.plugins.entrySet()) {
FileCollection pluginZip
if (plugin.getValue() instanceof Project) {
Project pluginProject = plugin.getValue()
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
}
String configurationName = "_plugin_${pluginProject.path}"
Configuration configuration = project.configurations.findByName(configurationName)
if (configuration == null) {
configuration = project.configurations.create(configurationName)
}
project.dependencies.add(configurationName, pluginProject)
setup.dependsOn(pluginProject.tasks.bundlePlugin)
pluginZip = configuration
for (Map.Entry<String, Project> plugin : node.config.plugins.entrySet()) {
// also allow rest tests to use the rest spec from the plugin
Copy copyRestSpec = null
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
if (restApiDir.exists() == false) continue
if (copyRestSpec == null) {
copyRestSpec = project.tasks.create(name: pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec'), type: Copy)
copyPlugins.dependsOn(copyRestSpec)
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
}
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
}
} else {
pluginZip = plugin.getValue()
Project pluginProject = plugin.getValue()
if (pluginProject.plugins.hasPlugin(PluginBuildPlugin) == false) {
throw new GradleException("Task ${name} cannot project ${pluginProject.path} which is not an esplugin")
}
pluginFiles.add(pluginZip)
String configurationName = "_plugin_${pluginProject.path}"
Configuration configuration = project.configurations.findByName(configurationName)
if (configuration == null) {
configuration = project.configurations.create(configurationName)
}
project.dependencies.add(configurationName, project.dependencies.project(path: pluginProject.path, configuration: 'zip'))
setup.dependsOn(pluginProject.tasks.bundlePlugin)
// also allow rest tests to use the rest spec from the plugin
String copyRestSpecTaskName = pluginTaskName('copy', plugin.getKey(), 'PluginRestSpec')
Copy copyRestSpec = project.tasks.findByName(copyRestSpecTaskName)
for (File resourceDir : pluginProject.sourceSets.test.resources.srcDirs) {
File restApiDir = new File(resourceDir, 'rest-api-spec/api')
if (restApiDir.exists() == false) continue
if (copyRestSpec == null) {
copyRestSpec = project.tasks.create(name: copyRestSpecTaskName, type: Copy)
copyPlugins.dependsOn(copyRestSpec)
copyRestSpec.into(project.sourceSets.test.output.resourcesDir)
}
copyRestSpec.from(resourceDir).include('rest-api-spec/api/**')
}
pluginFiles.add(configuration)
}
copyPlugins.into(node.pluginsTmpDir)
@ -379,15 +375,10 @@ class ClusterFormationTasks {
return installModule
}
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Object plugin) {
FileCollection pluginZip
if (plugin instanceof Project) {
pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
} else {
pluginZip = plugin
}
static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, Project plugin) {
FileCollection pluginZip = project.configurations.getByName("_plugin_${plugin.path}")
// delay reading the file location until execution time by wrapping in a closure within a GString
String file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
Object file = "${-> new File(node.pluginsTmpDir, pluginZip.singleFile.getName()).toURI().toURL().toString()}"
Object[] args = [new File(node.homeDir, 'bin/elasticsearch-plugin'), 'install', file]
return configureExecTask(name, project, setup, node, args)
}

View File

@ -32,7 +32,7 @@ import org.gradle.util.ConfigureUtil
*/
public class RestIntegTestTask extends RandomizedTestingTask {
ClusterConfiguration clusterConfig = new ClusterConfiguration()
ClusterConfiguration clusterConfig
/** Flag indicating whether the rest tests in the rest spec should be run. */
@Input
@ -44,6 +44,7 @@ public class RestIntegTestTask extends RandomizedTestingTask {
dependsOn(project.testClasses)
classpath = project.sourceSets.test.runtimeClasspath
testClassesDir = project.sourceSets.test.output.classesDir
clusterConfig = new ClusterConfiguration(project)
// start with the common test configuration
configure(BuildPlugin.commonTestConfig(project))

View File

@ -7,11 +7,15 @@ import org.gradle.util.ConfigureUtil
public class RunTask extends DefaultTask {
ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
ClusterConfiguration clusterConfig
public RunTask() {
description = "Runs elasticsearch with '${project.path}'"
group = 'Verification'
clusterConfig = new ClusterConfiguration(project)
clusterConfig.httpPort = 9200
clusterConfig.transportPort = 9300
clusterConfig.daemonize = false
project.afterEvaluate {
ClusterFormationTasks.setup(project, this, clusterConfig)
}

View File

@ -555,7 +555,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalAggregation.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]ValuesSourceAggregationBuilder.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]BucketsAggregator.java" checks="LineLength" />
@ -575,10 +574,7 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]InternalReverseNested.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]ReverseNestedAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]InternalRange.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]RangeAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]date[/\\]InternalDateRange.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]range[/\\]ipv4[/\\]InternalIPv4Range.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedBytesHashSamplerAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedMapSamplerAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]DiversifiedNumericSamplerAggregator.java" checks="LineLength" />
@ -586,12 +582,8 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]InternalSampler.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]sampler[/\\]SamplerAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]GlobalOrdinalsSignificantTermsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]InternalSignificantTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantLongTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantStringTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsAggregatorFactory.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificantTermsParametersParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]UnmappedSignificantTerms.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]GND.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]NXYSignificanceHeuristic.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]heuristics[/\\]PercentageScore.java" checks="LineLength" />
@ -611,7 +603,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]CardinalityAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]cardinality[/\\]HyperLogLogPlusPlus.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geobounds[/\\]GeoBoundsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]geobounds[/\\]InternalGeoBounds.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]AbstractTDigestPercentilesAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentileRanksAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]percentiles[/\\]tdigest[/\\]TDigestPercentilesAggregator.java" checks="LineLength" />
@ -619,7 +610,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]stats[/\\]extended[/\\]ExtendedStatsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]tophits[/\\]TopHitsAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]bucketscript[/\\]BucketScriptPipelineAggregator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]pipeline[/\\]derivative[/\\]InternalDerivative.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]AggregationPath.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]ValuesSourceParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueFormat.java" checks="LineLength" />
@ -750,7 +740,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]allocation[/\\]ShardsAllocatorModuleIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]allocation[/\\]SimpleAllocationIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]health[/\\]ClusterIndexHealthTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]health[/\\]ClusterStateHealthTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]AutoExpandReplicasTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]DateMathExpressionResolverTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]metadata[/\\]HumanReadableIndexSettingsTests.java" checks="LineLength" />
@ -856,7 +845,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReplicaShardAllocatorTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]ReusePeerRecoverySharedTest.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]get[/\\]GetActionIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]http[/\\]netty[/\\]NettyHttpServerPipeliningTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexModuleTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexServiceTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexWithShadowReplicasIT.java" checks="LineLength" />
@ -1047,7 +1035,6 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsDocCountErrorIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]TermsShardMinDocCountIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]nested[/\\]NestedAggregatorTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]bucket[/\\]significant[/\\]SignificanceHeuristicTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AbstractGeoTestCase.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]AvgIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]metrics[/\\]SumIT.java" checks="LineLength" />

View File

@ -37,8 +37,6 @@ import org.apache.http.client.methods.HttpPut;
import org.apache.http.client.methods.HttpRequestBase;
import org.apache.http.client.methods.HttpTrace;
import org.apache.http.client.utils.URIBuilder;
import org.apache.http.config.Registry;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.entity.ContentType;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
@ -91,7 +89,7 @@ public final class RestClient implements Closeable {
private final ConcurrentMap<HttpHost, DeadHostState> blacklist = new ConcurrentHashMap<>();
private final FailureListener failureListener;
private RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders,
HttpHost[] hosts, FailureListener failureListener) {
this.client = client;
this.maxRetryTimeoutMillis = maxRetryTimeoutMillis;
@ -393,10 +391,11 @@ public final class RestClient implements Closeable {
private static final Header[] EMPTY_HEADERS = new Header[0];
private final HttpHost[] hosts;
private CloseableHttpClient httpClient;
private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS;
private Header[] defaultHeaders = EMPTY_HEADERS;
private FailureListener failureListener;
private HttpClientConfigCallback httpClientConfigCallback;
private RequestConfigCallback requestConfigCallback;
/**
* Creates a new builder instance and sets the hosts that the client will send requests to.
@ -408,17 +407,6 @@ public final class RestClient implements Closeable {
this.hosts = hosts;
}
/**
* Sets the http client. A new default one will be created if not
* specified, by calling {@link #createDefaultHttpClient(Registry)})}.
*
* @see CloseableHttpClient
*/
public Builder setHttpClient(CloseableHttpClient httpClient) {
this.httpClient = httpClient;
return this;
}
/**
* Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request.
* {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified.
@ -434,12 +422,10 @@ public final class RestClient implements Closeable {
}
/**
* Sets the default request headers, to be used when creating the default http client instance.
* In case the http client is set through {@link #setHttpClient(CloseableHttpClient)}, the default headers need to be
* set to it externally during http client construction.
* Sets the default request headers, to be used sent with every request unless overridden on a per request basis
*/
public Builder setDefaultHeaders(Header[] defaultHeaders) {
Objects.requireNonNull(defaultHeaders, "default headers must not be null");
Objects.requireNonNull(defaultHeaders, "defaultHeaders must not be null");
for (Header defaultHeader : defaultHeaders) {
Objects.requireNonNull(defaultHeader, "default header must not be null");
}
@ -451,48 +437,94 @@ public final class RestClient implements Closeable {
* Sets the {@link FailureListener} to be notified for each request failure
*/
public Builder setFailureListener(FailureListener failureListener) {
Objects.requireNonNull(failureListener, "failure listener must not be null");
Objects.requireNonNull(failureListener, "failureListener must not be null");
this.failureListener = failureListener;
return this;
}
/**
* Sets the {@link HttpClientConfigCallback} to be used to customize http client configuration
*/
public Builder setHttpClientConfigCallback(HttpClientConfigCallback httpClientConfigCallback) {
Objects.requireNonNull(httpClientConfigCallback, "httpClientConfigCallback must not be null");
this.httpClientConfigCallback = httpClientConfigCallback;
return this;
}
/**
* Sets the {@link RequestConfigCallback} to be used to customize http client configuration
*/
public Builder setRequestConfigCallback(RequestConfigCallback requestConfigCallback) {
Objects.requireNonNull(requestConfigCallback, "requestConfigCallback must not be null");
this.requestConfigCallback = requestConfigCallback;
return this;
}
/**
* Creates a new {@link RestClient} based on the provided configuration.
*/
public RestClient build() {
if (httpClient == null) {
httpClient = createDefaultHttpClient(null);
}
if (failureListener == null) {
failureListener = new FailureListener();
}
CloseableHttpClient httpClient = createHttpClient();
return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener);
}
/**
* Creates a {@link CloseableHttpClient} with default settings. Used when the http client instance is not provided.
*
* @see CloseableHttpClient
*/
public static CloseableHttpClient createDefaultHttpClient(Registry<ConnectionSocketFactory> socketFactoryRegistry) {
PoolingHttpClientConnectionManager connectionManager;
if (socketFactoryRegistry == null) {
connectionManager = new PoolingHttpClientConnectionManager();
} else {
connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
private CloseableHttpClient createHttpClient() {
//default timeouts are all infinite
RequestConfig.Builder requestConfigBuilder = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS);
if (requestConfigCallback != null) {
requestConfigCallback.customizeRequestConfig(requestConfigBuilder);
}
RequestConfig requestConfig = requestConfigBuilder.build();
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager();
//default settings may be too constraining
connectionManager.setDefaultMaxPerRoute(10);
connectionManager.setMaxTotal(30);
//default timeouts are all infinite
RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS)
.setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS)
.setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS).build();
return HttpClientBuilder.create().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build();
HttpClientBuilder httpClientBuilder = HttpClientBuilder.create().setConnectionManager(connectionManager)
.setDefaultRequestConfig(requestConfig);
if (httpClientConfigCallback != null) {
httpClientConfigCallback.customizeHttpClient(httpClientBuilder);
}
return httpClientBuilder.build();
}
}
/**
* Callback used the default {@link RequestConfig} being set to the {@link CloseableHttpClient}
* @see HttpClientBuilder#setDefaultRequestConfig
*/
public interface RequestConfigCallback {
/**
* Allows to customize the {@link RequestConfig} that will be used with each request.
* It is common to customize the different timeout values through this method without losing any other useful default
* value that the {@link RestClient.Builder} internally sets.
*/
void customizeRequestConfig(RequestConfig.Builder requestConfigBuilder);
}
/**
* Callback used to customize the {@link CloseableHttpClient} instance used by a {@link RestClient} instance.
* Allows to customize default {@link RequestConfig} being set to the client and any parameter that
* can be set through {@link HttpClientBuilder}
*/
public interface HttpClientConfigCallback {
/**
* Allows to customize the {@link CloseableHttpClient} being created and used by the {@link RestClient}.
* It is common to customzie the default {@link org.apache.http.client.CredentialsProvider} through this method,
* without losing any other useful default value that the {@link RestClient.Builder} internally sets.
* Also useful to setup ssl through {@link SSLSocketFactoryHttpConfigCallback}.
*/
void customizeHttpClient(HttpClientBuilder httpClientBuilder);
}
/**
* Listener that allows to be notified whenever a failure happens. Useful when sniffing is enabled, so that we can sniff on failure.
* The default implementation is a no-op.

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.client;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
/**
* Helps configuring the http client when needing to communicate over ssl. It effectively replaces the connection manager
* with one that has ssl properly configured thanks to the provided {@link SSLConnectionSocketFactory}.
*/
public class SSLSocketFactoryHttpConfigCallback implements RestClient.HttpClientConfigCallback {
private final SSLConnectionSocketFactory sslSocketFactory;
public SSLSocketFactoryHttpConfigCallback(SSLConnectionSocketFactory sslSocketFactory) {
this.sslSocketFactory = sslSocketFactory;
}
@Override
public void customizeHttpClient(HttpClientBuilder httpClientBuilder) {
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder.<ConnectionSocketFactory>create()
.register("http", PlainConnectionSocketFactory.getSocketFactory())
.register("https", sslSocketFactory).build();
PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
//default settings may be too constraining
connectionManager.setDefaultMaxPerRoute(10);
connectionManager.setMaxTotal(30);
httpClientBuilder.setConnectionManager(connectionManager);
}
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.message.BasicHeader;
@ -67,7 +68,7 @@ public class RestClientBuilderTests extends RestClientTestCase {
RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("default headers must not be null", e.getMessage());
assertEquals("defaultHeaders must not be null", e.getMessage());
}
try {
@ -81,7 +82,21 @@ public class RestClientBuilderTests extends RestClientTestCase {
RestClient.builder(new HttpHost("localhost", 9200)).setFailureListener(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("failure listener must not be null", e.getMessage());
assertEquals("failureListener must not be null", e.getMessage());
}
try {
RestClient.builder(new HttpHost("localhost", 9200)).setHttpClientConfigCallback(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("httpClientConfigCallback must not be null", e.getMessage());
}
try {
RestClient.builder(new HttpHost("localhost", 9200)).setRequestConfigCallback(null);
fail("should have failed");
} catch(NullPointerException e) {
assertEquals("requestConfigCallback must not be null", e.getMessage());
}
int numNodes = RandomInts.randomIntBetween(getRandom(), 1, 5);
@ -91,7 +106,18 @@ public class RestClientBuilderTests extends RestClientTestCase {
}
RestClient.Builder builder = RestClient.builder(hosts);
if (getRandom().nextBoolean()) {
builder.setHttpClient(HttpClientBuilder.create().build());
builder.setHttpClientConfigCallback(new RestClient.HttpClientConfigCallback() {
@Override
public void customizeHttpClient(HttpClientBuilder httpClientBuilder) {
}
});
}
if (getRandom().nextBoolean()) {
builder.setRequestConfigCallback(new RestClient.RequestConfigCallback() {
@Override
public void customizeRequestConfig(RequestConfig.Builder requestConfigBuilder) {
}
});
}
if (getRandom().nextBoolean()) {
int numHeaders = RandomInts.randomIntBetween(getRandom(), 1, 5);

View File

@ -20,6 +20,7 @@
package org.elasticsearch.client;
import com.carrotsearch.randomizedtesting.generators.RandomInts;
import org.apache.http.Header;
import org.apache.http.HttpHost;
import org.apache.http.HttpRequest;
import org.apache.http.ProtocolVersion;
@ -91,7 +92,7 @@ public class RestClientMultipleHostsTests extends RestClientTestCase {
httpHosts[i] = new HttpHost("localhost", 9200 + i);
}
failureListener = new TrackingFailureListener();
restClient = RestClient.builder(httpHosts).setHttpClient(httpClient).setFailureListener(failureListener).build();
restClient = new RestClient(httpClient, 10000, new Header[0], httpHosts, failureListener);
}
public void testRoundRobinOkStatusCodes() throws Exception {

View File

@ -128,8 +128,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
}
httpHost = new HttpHost("localhost", 9200);
failureListener = new TrackingFailureListener();
restClient = RestClient.builder(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders)
.setFailureListener(failureListener).build();
restClient = new RestClient(httpClient, 10000, defaultHeaders, new HttpHost[]{httpHost}, failureListener);
}
/**

View File

@ -0,0 +1,53 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'elasticsearch.build'
group = 'org.elasticsearch.client'
dependencies {
compile "org.elasticsearch:elasticsearch:${version}"
compile project(path: ':modules:transport-netty3', configuration: 'runtime')
compile project(path: ':modules:reindex', configuration: 'runtime')
compile project(path: ':modules:lang-mustache', configuration: 'runtime')
compile project(path: ':modules:percolator', configuration: 'runtime')
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
testCompile "junit:junit:${versions.junit}"
}
dependencyLicenses {
dependencies = project.configurations.runtime.fileCollection {
it.group.startsWith('org.elasticsearch') == false
}
}
forbiddenApisTest {
// we don't use the core test-framework, no lucene classes present so we don't want the es-test-signatures to
// be pulled in
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
PrecommitTasks.getResource('/forbidden/es-all-signatures.txt')]
}
namingConventions {
testClass = 'com.carrotsearch.randomizedtesting.RandomizedTest'
//we don't have integration tests
skipIntegTestInDisguise = true
}

View File

@ -0,0 +1,63 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.client;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.reindex.ReindexPlugin;
import org.elasticsearch.percolator.PercolatorPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.mustache.MustachePlugin;
import org.elasticsearch.transport.Netty3Plugin;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
/**
* A builder to create an instance of {@link TransportClient}
* This class pre-installs the {@link Netty3Plugin}, {@link ReindexPlugin}, {@link PercolatorPlugin}, and {@link MustachePlugin}
* for the client. These plugins are all elasticsearch core modules required.
*/
@SuppressWarnings({"unchecked","varargs"})
public class PreBuiltTransportClient extends TransportClient {
private static final Collection<Class<? extends Plugin>> PRE_INSTALLED_PLUGINS = Collections.unmodifiableList(Arrays.asList(
TransportPlugin.class, ReindexPlugin.class, PercolatorPlugin.class, MustachePlugin.class));
@SafeVarargs
public PreBuiltTransportClient(Settings settings, Class<? extends Plugin>... plugins) {
this(settings, Arrays.asList(plugins));
}
public PreBuiltTransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
super(settings, Settings.EMPTY, addPlugins(plugins, PRE_INSTALLED_PLUGINS));
}
/**
* The default transport implementation for the transport client.
*/
public static final class TransportPlugin extends Netty3Plugin {
// disable assertions for permissions since we might not have the permissions here
// compared to if we are loaded as a real module to the es server
public TransportPlugin(Settings settings) {
super(Settings.builder().put("netty.assert.buglevel", false).put(settings).build());
}
}
}

View File

@ -0,0 +1,60 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.client;
import com.carrotsearch.randomizedtesting.RandomizedTest;
import org.elasticsearch.client.transport.TransportClient;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.reindex.ReindexPlugin;
import org.elasticsearch.percolator.PercolatorPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.mustache.MustachePlugin;
import org.elasticsearch.transport.Netty3Plugin;
import org.junit.Test;
import java.util.Arrays;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
public class PreBuiltTransportClientTests extends RandomizedTest {
@Test
public void testPluginInstalled() {
try (TransportClient client = new PreBuiltTransportClient(Settings.EMPTY)) {
Settings settings = client.settings();
assertEquals(Netty3Plugin.NETTY_TRANSPORT_NAME, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
}
}
@Test
public void testInstallPluginTwice() {
for (Class<? extends Plugin> plugin : Arrays.asList(ReindexPlugin.class, PercolatorPlugin.class,
MustachePlugin.class)) {
try {
new PreBuiltTransportClient(Settings.EMPTY, plugin);
fail("exception expected");
} catch (IllegalArgumentException ex) {
assertEquals("plugin is already installed", ex.getMessage());
}
}
}
}

View File

@ -74,8 +74,6 @@ dependencies {
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml:${versions.jackson}"
compile "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}"
// network stack
compile 'io.netty:netty:3.10.6.Final'
// percentiles aggregation
compile 'com.tdunning:t-digest:3.0'
// precentil ranks aggregation
@ -152,26 +150,11 @@ processResources {
}
thirdPartyAudit.excludes = [
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
// classes are missing!
// from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml)
'com.fasterxml.jackson.databind.ObjectMapper',
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32FrameDecoder (netty)
'com.google.protobuf.CodedInputStream',
// from org.jboss.netty.handler.codec.protobuf.ProtobufVarint32LengthFieldPrepender (netty)
'com.google.protobuf.CodedOutputStream',
// from org.jboss.netty.handler.codec.protobuf.ProtobufDecoder (netty)
'com.google.protobuf.ExtensionRegistry',
'com.google.protobuf.MessageLite$Builder',
'com.google.protobuf.MessageLite',
'com.google.protobuf.Parser',
// from org.apache.log4j.receivers.net.JMSReceiver (log4j-extras)
'javax.jms.Message',
'javax.jms.MessageListener',
@ -196,72 +179,8 @@ thirdPartyAudit.excludes = [
'javax.mail.internet.MimeMessage',
'javax.mail.internet.MimeMultipart',
'javax.mail.internet.MimeUtility',
// from org.jboss.netty.channel.socket.http.HttpTunnelingServlet (netty)
'javax.servlet.ServletConfig',
'javax.servlet.ServletException',
'javax.servlet.ServletOutputStream',
'javax.servlet.http.HttpServlet',
'javax.servlet.http.HttpServletRequest',
'javax.servlet.http.HttpServletResponse',
// from org.jboss.netty.logging.CommonsLoggerFactory (netty)
'org.apache.commons.logging.Log',
'org.apache.commons.logging.LogFactory',
// from org.jboss.netty.handler.ssl.OpenSslEngine (netty)
'org.apache.tomcat.jni.Buffer',
'org.apache.tomcat.jni.Library',
'org.apache.tomcat.jni.Pool',
'org.apache.tomcat.jni.SSL',
'org.apache.tomcat.jni.SSLContext',
// from org.jboss.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty)
'org.bouncycastle.asn1.x500.X500Name',
'org.bouncycastle.cert.X509v3CertificateBuilder',
'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter',
'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder',
'org.bouncycastle.jce.provider.BouncyCastleProvider',
'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder',
// from org.jboss.netty.handler.ssl.JettyNpnSslEngine (netty)
'org.eclipse.jetty.npn.NextProtoNego$ClientProvider',
'org.eclipse.jetty.npn.NextProtoNego$ServerProvider',
'org.eclipse.jetty.npn.NextProtoNego',
// from org.jboss.netty.logging.JBossLoggerFactory (netty)
'org.jboss.logging.Logger',
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteInput (netty)
'org.jboss.marshalling.ByteInput',
// from org.jboss.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty)
'org.jboss.marshalling.ByteOutput',
// from org.jboss.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty)
'org.jboss.marshalling.Marshaller',
// from org.jboss.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty)
'org.jboss.marshalling.MarshallerFactory',
'org.jboss.marshalling.MarshallingConfiguration',
'org.jboss.marshalling.Unmarshaller',
// from org.locationtech.spatial4j.io.GeoJSONReader (spatial4j)
'org.noggit.JSONParser',
// from org.jboss.netty.container.osgi.NettyBundleActivator (netty)
'org.osgi.framework.BundleActivator',
'org.osgi.framework.BundleContext',
// from org.jboss.netty.logging.OsgiLoggerFactory$1 (netty)
'org.osgi.framework.ServiceReference',
'org.osgi.service.log.LogService',
'org.osgi.util.tracker.ServiceTracker',
'org.osgi.util.tracker.ServiceTrackerCustomizer',
// from org.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional
'org.slf4j.Logger',
'org.slf4j.LoggerFactory',
]
// dependency license are currently checked in distribution

View File

@ -27,6 +27,7 @@ import org.apache.lucene.search.MultiPhraseQuery;
import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.join.ToParentBlockJoinQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
@ -71,6 +72,9 @@ public class CustomFieldQuery extends FieldQuery {
} else if (sourceQuery instanceof BlendedTermQuery) {
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
} else if (sourceQuery instanceof ToParentBlockJoinQuery) {
ToParentBlockJoinQuery blockJoinQuery = (ToParentBlockJoinQuery) sourceQuery;
flatten(blockJoinQuery.getChildQuery(), reader, flatQueries, boost);
} else {
super.flatten(sourceQuery, reader, flatQueries, boost);
}

View File

@ -658,8 +658,7 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.search.aggregations.InvalidAggregationPathException::new, 121),
INDEX_ALREADY_EXISTS_EXCEPTION(org.elasticsearch.indices.IndexAlreadyExistsException.class,
org.elasticsearch.indices.IndexAlreadyExistsException::new, 123),
SCRIPT_PARSE_EXCEPTION(org.elasticsearch.script.Script.ScriptParseException.class,
org.elasticsearch.script.Script.ScriptParseException::new, 124),
// 124 used to be Script.ScriptParseException
HTTP_ON_TRANSPORT_EXCEPTION(TcpTransport.HttpOnTransportException.class,
TcpTransport.HttpOnTransportException::new, 125),
MAPPER_PARSING_EXCEPTION(org.elasticsearch.index.mapper.MapperParsingException.class,
@ -692,7 +691,8 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
org.elasticsearch.index.query.QueryShardException::new, 141),
NO_LONGER_PRIMARY_SHARD_EXCEPTION(ShardStateAction.NoLongerPrimaryShardException.class,
ShardStateAction.NoLongerPrimaryShardException::new, 142),
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143);
SCRIPT_EXCEPTION(org.elasticsearch.script.ScriptException.class, org.elasticsearch.script.ScriptException::new, 143),
NOT_MASTER_EXCEPTION(org.elasticsearch.cluster.NotMasterException.class, org.elasticsearch.cluster.NotMasterException::new, 144);
final Class<? extends ElasticsearchException> exceptionClass;

View File

@ -19,6 +19,8 @@
package org.elasticsearch.action;
import java.util.function.Consumer;
/**
* A listener for action responses or failures.
*/
@ -33,4 +35,31 @@ public interface ActionListener<Response> {
* A failure caused by an exception at some phase of the task.
*/
void onFailure(Exception e);
/**
* Creates a listener that listens for a response (or failure) and executes the
* corresponding consumer when the response (or failure) is received.
*
* @param onResponse the consumer of the response, when the listener receives one
* @param onFailure the consumer of the failure, when the listener receives one
* @param <Response> the type of the response
* @return a listener that listens for responses and invokes the consumer when received
*/
static <Response> ActionListener<Response> wrap(Consumer<Response> onResponse, Consumer<Exception> onFailure) {
return new ActionListener<Response>() {
@Override
public void onResponse(Response response) {
try {
onResponse.accept(response);
} catch (Exception e) {
onFailure(e);
}
}
@Override
public void onFailure(Exception e) {
onFailure.accept(e);
}
};
}
}

View File

@ -20,10 +20,12 @@
package org.elasticsearch.action;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction;
import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction;
@ -335,7 +337,8 @@ public class ActionModule extends AbstractModule {
actionFilters = setupActionFilters(actionPlugins, ingestEnabled);
autoCreateIndex = transportClient ? null : new AutoCreateIndex(settings, resolver);
destructiveOperations = new DestructiveOperations(settings, clusterSettings);
restController = new RestController(settings);
Set<String> headers = actionPlugins.stream().flatMap(p -> p.getRestHeaders().stream()).collect(Collectors.toSet());
restController = new RestController(settings, headers);
}
public Map<String, ActionHandler<?, ?>> getActions() {

View File

@ -52,6 +52,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
private Integer shard;
private Boolean primary;
private boolean includeYesDecisions = false;
private boolean includeDiskInfo = false;
/** Explain the first unassigned shard */
public ClusterAllocationExplainRequest() {
@ -134,6 +135,16 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
return this.includeYesDecisions;
}
/** {@code true} to include information about the gathered disk information of nodes in the cluster */
public void includeDiskInfo(boolean includeDiskInfo) {
this.includeDiskInfo = includeDiskInfo;
}
/** Returns true if information about disk usage and shard sizes should also be returned */
public boolean includeDiskInfo() {
return this.includeDiskInfo;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder("ClusterAllocationExplainRequest[");
@ -164,6 +175,7 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
this.shard = in.readOptionalVInt();
this.primary = in.readOptionalBoolean();
this.includeYesDecisions = in.readBoolean();
this.includeDiskInfo = in.readBoolean();
}
@Override
@ -173,5 +185,6 @@ public class ClusterAllocationExplainRequest extends MasterNodeRequest<ClusterAl
out.writeOptionalVInt(shard);
out.writeOptionalBoolean(primary);
out.writeBoolean(includeYesDecisions);
out.writeBoolean(includeDiskInfo);
}
}

View File

@ -53,6 +53,18 @@ public class ClusterAllocationExplainRequestBuilder
return this;
}
/** Whether to include "YES" decider decisions in the response instead of only "NO" decisions */
public ClusterAllocationExplainRequestBuilder setIncludeYesDecisions(boolean includeYesDecisions) {
request.includeYesDecisions(includeYesDecisions);
return this;
}
/** Whether to include information about the gathered disk information of nodes in the cluster */
public ClusterAllocationExplainRequestBuilder setIncludeDiskInfo(boolean includeDiskInfo) {
request.includeDiskInfo(includeDiskInfo);
return this;
}
/**
* Signal that the first unassigned shard should be used
*/

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.cluster.allocation;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.Nullable;
@ -48,10 +49,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
private final long allocationDelayMillis;
private final long remainingDelayMillis;
private final Map<DiscoveryNode, NodeExplanation> nodeExplanations;
private final ClusterInfo clusterInfo;
public ClusterAllocationExplanation(ShardId shard, boolean primary, @Nullable String assignedNodeId, long allocationDelayMillis,
long remainingDelayMillis, @Nullable UnassignedInfo unassignedInfo, boolean hasPendingAsyncFetch,
Map<DiscoveryNode, NodeExplanation> nodeExplanations) {
Map<DiscoveryNode, NodeExplanation> nodeExplanations, @Nullable ClusterInfo clusterInfo) {
this.shard = shard;
this.primary = primary;
this.hasPendingAsyncFetch = hasPendingAsyncFetch;
@ -60,6 +62,7 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
this.allocationDelayMillis = allocationDelayMillis;
this.remainingDelayMillis = remainingDelayMillis;
this.nodeExplanations = nodeExplanations;
this.clusterInfo = clusterInfo;
}
public ClusterAllocationExplanation(StreamInput in) throws IOException {
@ -78,6 +81,11 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
nodeToExplanation.put(nodeExplanation.getNode(), nodeExplanation);
}
this.nodeExplanations = nodeToExplanation;
if (in.readBoolean()) {
this.clusterInfo = new ClusterInfo(in);
} else {
this.clusterInfo = null;
}
}
@Override
@ -94,6 +102,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
for (NodeExplanation explanation : this.nodeExplanations.values()) {
explanation.writeTo(out);
}
if (this.clusterInfo != null) {
out.writeBoolean(true);
this.clusterInfo.writeTo(out);
} else {
out.writeBoolean(false);
}
}
/** Return the shard that the explanation is about */
@ -143,6 +157,12 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
return this.nodeExplanations;
}
/** Return the cluster disk info for the cluster or null if none available */
@Nullable
public ClusterInfo getClusterInfo() {
return this.clusterInfo;
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); {
builder.startObject("shard"); {
@ -164,11 +184,18 @@ public final class ClusterAllocationExplanation implements ToXContent, Writeable
builder.timeValueField("allocation_delay_in_millis", "allocation_delay", TimeValue.timeValueMillis(allocationDelayMillis));
builder.timeValueField("remaining_delay_in_millis", "remaining_delay", TimeValue.timeValueMillis(remainingDelayMillis));
}
builder.startObject("nodes");
for (NodeExplanation explanation : nodeExplanations.values()) {
explanation.toXContent(builder, params);
builder.startObject("nodes"); {
for (NodeExplanation explanation : nodeExplanations.values()) {
explanation.toXContent(builder, params);
}
}
builder.endObject(); // end nodes
if (this.clusterInfo != null) {
builder.startObject("cluster_info"); {
this.clusterInfo.toXContent(builder, params);
}
builder.endObject(); // end "cluster_info"
}
}
builder.endObject(); // end wrapping object
return builder;

View File

@ -28,6 +28,7 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -219,7 +220,7 @@ public class TransportClusterAllocationExplainAction
public static ClusterAllocationExplanation explainShard(ShardRouting shard, RoutingAllocation allocation, RoutingNodes routingNodes,
boolean includeYesDecisions, ShardsAllocator shardAllocator,
List<IndicesShardStoresResponse.StoreStatus> shardStores,
GatewayAllocator gatewayAllocator) {
GatewayAllocator gatewayAllocator, ClusterInfo clusterInfo) {
// don't short circuit deciders, we want a full explanation
allocation.debugDecision(true);
// get the existing unassigned info if available
@ -262,16 +263,17 @@ public class TransportClusterAllocationExplainAction
explanations.put(node, nodeExplanation);
}
return new ClusterAllocationExplanation(shard.shardId(), shard.primary(),
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations);
shard.currentNodeId(), allocationDelayMillis, remainingDelayMillis, ui,
gatewayAllocator.hasFetchPending(shard.shardId(), shard.primary()), explanations, clusterInfo);
}
@Override
protected void masterOperation(final ClusterAllocationExplainRequest request, final ClusterState state,
final ActionListener<ClusterAllocationExplainResponse> listener) {
final RoutingNodes routingNodes = state.getRoutingNodes();
final ClusterInfo clusterInfo = clusterInfoService.getClusterInfo();
final RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, state,
clusterInfoService.getClusterInfo(), System.nanoTime(), false);
clusterInfo, System.nanoTime(), false);
ShardRouting foundShard = null;
if (request.useAnyUnassignedShard()) {
@ -318,7 +320,8 @@ public class TransportClusterAllocationExplainAction
shardStoreResponse.getStoreStatuses().get(shardRouting.getIndexName());
List<IndicesShardStoresResponse.StoreStatus> shardStoreStatus = shardStatuses.get(shardRouting.id());
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, routingNodes,
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator);
request.includeYesDecisions(), shardAllocator, shardStoreStatus, gatewayAllocator,
request.includeDiskInfo() ? clusterInfo : null);
listener.onResponse(new ClusterAllocationExplainResponse(cae));
}

View File

@ -128,6 +128,7 @@ public class ListTasksResponse extends BaseTasksResponse implements ToXContent {
* {@code group_by=nodes}.
*/
public void setDiscoveryNodes(DiscoveryNodes discoveryNodes) {
//WTF is this? Why isn't this set by default;
this.discoveryNodes = discoveryNodes;
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.create;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -55,6 +56,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
private final Set<ClusterBlock> blocks = new HashSet<>();
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, boolean updateAllTypes) {
this.originalMessage = originalMessage;
@ -98,6 +101,11 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
return this;
}
public CreateIndexClusterStateUpdateRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
this.waitForActiveShards = waitForActiveShards;
return this;
}
public TransportMessage originalMessage() {
return originalMessage;
}
@ -142,4 +150,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ
public boolean updateAllTypes() {
return updateAllTypes;
}
public ActiveShardCount waitForActiveShards() {
return waitForActiveShards;
}
}

View File

@ -25,6 +25,7 @@ import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -77,6 +78,8 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
private boolean updateAllTypes = false;
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
public CreateIndexRequest() {
}
@ -440,6 +443,30 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
return this;
}
public ActiveShardCount waitForActiveShards() {
return waitForActiveShards;
}
/**
* Sets the number of shard copies that should be active for index creation to return.
* Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy
* (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to
* wait for all shards (primary and all replicas) to be active before returning.
* Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
* to wait for the desired amount of shard copies to become active before returning.
* Index creation will only wait up until the timeout value for the number of shard copies
* to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to
* determine if the requisite shard copies were all started before returning or timing out.
*
* @param waitForActiveShards number of active shard copies to wait on
*/
public CreateIndexRequest waitForActiveShards(ActiveShardCount waitForActiveShards) {
this.waitForActiveShards = waitForActiveShards;
return this;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -462,6 +489,7 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
aliases.add(Alias.read(in));
}
updateAllTypes = in.readBoolean();
waitForActiveShards = ActiveShardCount.readFrom(in);
}
@Override
@ -486,5 +514,6 @@ public class CreateIndexRequest extends AcknowledgedRequest<CreateIndexRequest>
alias.writeTo(out);
}
out.writeBoolean(updateAllTypes);
waitForActiveShards.writeTo(out);
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.admin.indices.create;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -249,4 +250,23 @@ public class CreateIndexRequestBuilder extends AcknowledgedRequestBuilder<Create
request.updateAllTypes(updateAllTypes);
return this;
}
/**
* Sets the number of shard copies that should be active for index creation to return.
* Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy
* (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to
* wait for all shards (primary and all replicas) to be active before returning.
* Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
* to wait for the desired amount of shard copies to become active before returning.
* Index creation will only wait up until the timeout value for the number of shard copies
* to be active before returning. Check {@link CreateIndexResponse#isShardsAcked()} to
* determine if the requisite shard copies were all started before returning or timing out.
*
* @param waitForActiveShards number of active shard copies to wait on
*/
public CreateIndexRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
request.waitForActiveShards(waitForActiveShards);
return this;
}
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.action.admin.indices.create;
import org.elasticsearch.action.support.master.AcknowledgedResponse;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
@ -30,22 +31,41 @@ import java.io.IOException;
*/
public class CreateIndexResponse extends AcknowledgedResponse {
private boolean shardsAcked;
protected CreateIndexResponse() {
}
protected CreateIndexResponse(boolean acknowledged) {
protected CreateIndexResponse(boolean acknowledged, boolean shardsAcked) {
super(acknowledged);
assert acknowledged || shardsAcked == false; // if its not acknowledged, then shards acked should be false too
this.shardsAcked = shardsAcked;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
readAcknowledged(in);
shardsAcked = in.readBoolean();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
writeAcknowledged(out);
out.writeBoolean(shardsAcked);
}
/**
* Returns true if the requisite number of shards were started before
* returning from the index creation operation. If {@link #isAcknowledged()}
* is false, then this also returns false.
*/
public boolean isShardsAcked() {
return shardsAcked;
}
public void addCustomFields(XContentBuilder builder) throws IOException {
builder.field("shards_acknowledged", isShardsAcked());
}
}

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -31,7 +30,6 @@ import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -77,24 +75,12 @@ public class TransportCreateIndexAction extends TransportMasterNodeAction<Create
final CreateIndexClusterStateUpdateRequest updateRequest = new CreateIndexClusterStateUpdateRequest(request, cause, indexName, request.updateAllTypes())
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.settings(request.settings()).mappings(request.mappings())
.aliases(request.aliases()).customs(request.customs());
.aliases(request.aliases()).customs(request.customs())
.waitForActiveShards(request.waitForActiveShards());
createIndexService.createIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new CreateIndexResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Exception t) {
if (t instanceof IndexAlreadyExistsException) {
logger.trace("[{}] failed to create", t, request.index());
} else {
logger.debug("[{}] failed to create", t, request.index());
}
listener.onFailure(t);
}
});
createIndexService.createIndex(updateRequest, ActionListener.wrap(response ->
listener.onResponse(new CreateIndexResponse(response.isAcknowledged(), response.isShardsAcked())),
listener::onFailure));
}
}

View File

@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.ParseField;
@ -206,4 +207,22 @@ public class RolloverRequest extends AcknowledgedRequest<RolloverRequest> implem
}
}
/**
* Sets the number of shard copies that should be active for creation of the
* new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
* wait for one shard copy (the primary) to become active. Set this value to
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
* to wait for the desired amount of shard copies to become active before returning.
* Index creation will only wait up until the timeout value for the number of shard copies
* to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to
* determine if the requisite shard copies were all started before returning or timing out.
*
* @param waitForActiveShards number of active shard copies to wait on
*/
public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
this.createIndexRequest.waitForActiveShards(waitForActiveShards);
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.indices.rollover;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.settings.Settings;
@ -70,4 +71,23 @@ public class RolloverRequestBuilder extends MasterNodeOperationRequestBuilder<Ro
this.request.getCreateIndexRequest().mapping(type, source);
return this;
}
/**
* Sets the number of shard copies that should be active for creation of the
* new rollover index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
* wait for one shard copy (the primary) to become active. Set this value to
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
* to wait for the desired amount of shard copies to become active before returning.
* Index creation will only wait up until the timeout value for the number of shard copies
* to be active before returning. Check {@link RolloverResponse#isShardsAcked()} to
* determine if the requisite shard copies were all started before returning or timing out.
*
* @param waitForActiveShards number of active shard copies to wait on
*/
public RolloverRequestBuilder waitForActiveShards(ActiveShardCount waitForActiveShards) {
this.request.setWaitForActiveShards(waitForActiveShards);
return this;
}
}

View File

@ -39,22 +39,28 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
private static final String DRY_RUN = "dry_run";
private static final String ROLLED_OVER = "rolled_over";
private static final String CONDITIONS = "conditions";
private static final String ACKNOWLEDGED = "acknowledged";
private static final String SHARDS_ACKED = "shards_acknowledged";
private String oldIndex;
private String newIndex;
private Set<Map.Entry<String, Boolean>> conditionStatus;
private boolean dryRun;
private boolean rolledOver;
private boolean acknowledged;
private boolean shardsAcked;
RolloverResponse() {
}
RolloverResponse(String oldIndex, String newIndex, Set<Condition.Result> conditionResults,
boolean dryRun, boolean rolledOver) {
boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcked) {
this.oldIndex = oldIndex;
this.newIndex = newIndex;
this.dryRun = dryRun;
this.rolledOver = rolledOver;
this.acknowledged = acknowledged;
this.shardsAcked = shardsAcked;
this.conditionStatus = conditionResults.stream()
.map(result -> new AbstractMap.SimpleEntry<>(result.condition.toString(), result.matched))
.collect(Collectors.toSet());
@ -89,12 +95,31 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
}
/**
* Returns if the rollover was not simulated and the conditions were met
* Returns true if the rollover was not simulated and the conditions were met
*/
public boolean isRolledOver() {
return rolledOver;
}
/**
* Returns true if the creation of the new rollover index and switching of the
* alias to the newly created index was successful, and returns false otherwise.
* If {@link #isDryRun()} is true, then this will also return false. If this
* returns false, then {@link #isShardsAcked()} will also return false.
*/
public boolean isAcknowledged() {
return acknowledged;
}
/**
* Returns true if the requisite number of shards were started in the newly
* created rollover index before returning. If {@link #isAcknowledged()} is
* false, then this will also return false.
*/
public boolean isShardsAcked() {
return shardsAcked;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -110,6 +135,8 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
conditionStatus = conditions;
dryRun = in.readBoolean();
rolledOver = in.readBoolean();
acknowledged = in.readBoolean();
shardsAcked = in.readBoolean();
}
@Override
@ -124,6 +151,8 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
}
out.writeBoolean(dryRun);
out.writeBoolean(rolledOver);
out.writeBoolean(acknowledged);
out.writeBoolean(shardsAcked);
}
@Override
@ -132,6 +161,8 @@ public final class RolloverResponse extends ActionResponse implements ToXContent
builder.field(NEW_INDEX, newIndex);
builder.field(ROLLED_OVER, rolledOver);
builder.field(DRY_RUN, dryRun);
builder.field(ACKNOWLEDGED, acknowledged);
builder.field(SHARDS_ACKED, shardsAcked);
builder.startObject(CONDITIONS);
for (Map.Entry<String, Boolean> entry : conditionStatus) {
builder.field(entry.getKey(), entry.getValue());

View File

@ -25,11 +25,12 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpda
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.ActiveShardsObserver;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.AliasAction;
@ -58,6 +59,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
private static final Pattern INDEX_NAME_PATTERN = Pattern.compile("^.*-(\\d)+$");
private final MetaDataCreateIndexService createIndexService;
private final MetaDataIndexAliasesService indexAliasesService;
private final ActiveShardsObserver activeShardsObserver;
private final Client client;
@Inject
@ -70,6 +72,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
this.createIndexService = createIndexService;
this.indexAliasesService = indexAliasesService;
this.client = client;
this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool);
}
@Override
@ -110,42 +113,34 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
: generateRolloverIndexName(sourceIndexName);
if (rolloverRequest.isDryRun()) {
listener.onResponse(
new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false));
new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults, true, false, false, false));
return;
}
if (conditionResults.size() == 0 || conditionResults.stream().anyMatch(result -> result.matched)) {
createIndexService.createIndex(prepareCreateIndexRequest(rolloverIndexName, rolloverRequest),
new ActionListener<ClusterStateUpdateResponse>() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
// switch the alias to point to the newly created index
indexAliasesService.indicesAliases(
prepareRolloverAliasesUpdateRequest(sourceIndexName, rolloverIndexName,
rolloverRequest),
new ActionListener<ClusterStateUpdateResponse>() {
@Override
public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) {
listener.onResponse(
new RolloverResponse(sourceIndexName, rolloverIndexName,
conditionResults, false, true));
}
@Override
public void onFailure(Exception e) {
listener.onFailure(e);
}
});
}
@Override
public void onFailure(Exception t) {
listener.onFailure(t);
}
});
CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(rolloverIndexName, rolloverRequest);
createIndexService.createIndex(updateRequest, ActionListener.wrap(createIndexClusterStateUpdateResponse -> {
// switch the alias to point to the newly created index
indexAliasesService.indicesAliases(
prepareRolloverAliasesUpdateRequest(sourceIndexName, rolloverIndexName,
rolloverRequest),
ActionListener.wrap(aliasClusterStateUpdateResponse -> {
if (aliasClusterStateUpdateResponse.isAcknowledged()) {
activeShardsObserver.waitForActiveShards(rolloverIndexName,
rolloverRequest.getCreateIndexRequest().waitForActiveShards(),
rolloverRequest.masterNodeTimeout(),
isShardsAcked -> listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName,
conditionResults, false, true, true, isShardsAcked)),
listener::onFailure);
} else {
listener.onResponse(new RolloverResponse(sourceIndexName, rolloverIndexName, conditionResults,
false, true, false, false));
}
}, listener::onFailure));
}, listener::onFailure));
} else {
// conditions not met
listener.onResponse(
new RolloverResponse(sourceIndexName, sourceIndexName, conditionResults, false, false)
new RolloverResponse(sourceIndexName, sourceIndexName, conditionResults, false, false, false, false)
);
}
}
@ -216,6 +211,7 @@ public class TransportRolloverAction extends TransportMasterNodeAction<RolloverR
.masterNodeTimeout(createIndexRequest.masterNodeTimeout())
.settings(createIndexRequest.settings())
.aliases(createIndexRequest.aliases())
.waitForActiveShards(ActiveShardCount.NONE) // not waiting for shards here, will wait on the alias switch operation
.mappings(createIndexRequest.mappings());
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterShardHealth;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
@ -93,12 +94,14 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
logger.trace("using cluster state version [{}] to determine shards", state.version());
// collect relevant shard ids of the requested indices for fetching store infos
for (String index : concreteIndices) {
IndexMetaData indexMetaData = state.metaData().index(index);
IndexRoutingTable indexShardRoutingTables = routingTables.index(index);
if (indexShardRoutingTables == null) {
continue;
}
for (IndexShardRoutingTable routing : indexShardRoutingTables) {
ClusterShardHealth shardHealth = new ClusterShardHealth(routing.shardId().id(), routing);
final int shardId = routing.shardId().id();
ClusterShardHealth shardHealth = new ClusterShardHealth(shardId, routing, indexMetaData);
if (request.shardStatuses().contains(shardHealth.getStatus())) {
shardIdsToFetch.add(routing.shardId());
}

View File

@ -22,6 +22,7 @@ import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequest;
import org.elasticsearch.common.ParseField;
@ -36,7 +37,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
@ -126,6 +126,24 @@ public class ShrinkRequest extends AcknowledgedRequest<ShrinkRequest> implements
return sourceIndex;
}
/**
* Sets the number of shard copies that should be active for creation of the
* new shrunken index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
* wait for one shard copy (the primary) to become active. Set this value to
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
* to wait for the desired amount of shard copies to become active before returning.
* Index creation will only wait up until the timeout value for the number of shard copies
* to be active before returning. Check {@link ShrinkResponse#isShardsAcked()} to
* determine if the requisite shard copies were all started before returning or timing out.
*
* @param waitForActiveShards number of active shard copies to wait on
*/
public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
this.getShrinkIndexRequest().waitForActiveShards(waitForActiveShards);
}
public void source(BytesReference source) {
XContentType xContentType = XContentFactory.xContentType(source);
if (xContentType != null) {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.admin.indices.shrink;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.settings.Settings;
@ -44,4 +45,23 @@ public class ShrinkRequestBuilder extends AcknowledgedRequestBuilder<ShrinkReque
this.request.getShrinkIndexRequest().settings(settings);
return this;
}
/**
* Sets the number of shard copies that should be active for creation of the
* new shrunken index to return. Defaults to {@link ActiveShardCount#DEFAULT}, which will
* wait for one shard copy (the primary) to become active. Set this value to
* {@link ActiveShardCount#ALL} to wait for all shards (primary and all replicas) to be active
* before returning. Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
* to wait for the desired amount of shard copies to become active before returning.
* Index creation will only wait up until the timeout value for the number of shard copies
* to be active before returning. Check {@link ShrinkResponse#isShardsAcked()} to
* determine if the requisite shard copies were all started before returning or timing out.
*
* @param waitForActiveShards number of active shard copies to wait on
*/
public ShrinkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) {
this.request.setWaitForActiveShards(waitForActiveShards);
return this;
}
}

View File

@ -25,7 +25,7 @@ public final class ShrinkResponse extends CreateIndexResponse {
ShrinkResponse() {
}
ShrinkResponse(boolean acknowledged) {
super(acknowledged);
ShrinkResponse(boolean acknowledged, boolean shardsAcked) {
super(acknowledged, shardsAcked);
}
}

View File

@ -29,7 +29,6 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -40,7 +39,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -93,22 +91,8 @@ public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkReque
IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i);
return shard == null ? null : shard.getPrimary().getDocs();
}, indexNameExpressionResolver);
createIndexService.createIndex(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
listener.onResponse(new ShrinkResponse(response.isAcknowledged()));
}
@Override
public void onFailure(Exception t) {
if (t instanceof IndexAlreadyExistsException) {
logger.trace("[{}] failed to create shrink index", t, updateRequest.index());
} else {
logger.debug("[{}] failed to create shrink index", t, updateRequest.index());
}
listener.onFailure(t);
}
});
createIndexService.createIndex(updateRequest, ActionListener.wrap(response ->
listener.onResponse(new ShrinkResponse(response.isAcknowledged(), response.isShardsAcked())), listener::onFailure));
}
@Override
@ -162,6 +146,7 @@ public class TransportShrinkAction extends TransportMasterNodeAction<ShrinkReque
.settings(targetIndex.settings())
.aliases(targetIndex.aliases())
.customs(targetIndex.customs())
.waitForActiveShards(targetIndex.waitForActiveShards())
.shrinkFrom(metaData.getIndex());
}

View File

@ -119,29 +119,21 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
if (needToCheck()) {
// Keep track of all unique indices and all unique types per index for the create index requests:
final Map<String, Set<String>> indicesAndTypes = new HashMap<>();
final Set<String> autoCreateIndices = new HashSet<>();
for (ActionRequest request : bulkRequest.requests) {
if (request instanceof DocumentRequest) {
DocumentRequest req = (DocumentRequest) request;
Set<String> types = indicesAndTypes.get(req.index());
if (types == null) {
indicesAndTypes.put(req.index(), types = new HashSet<>());
}
types.add(req.type());
autoCreateIndices.add(req.index());
} else {
throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName());
}
}
final AtomicInteger counter = new AtomicInteger(indicesAndTypes.size());
final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
ClusterState state = clusterService.state();
for (Map.Entry<String, Set<String>> entry : indicesAndTypes.entrySet()) {
final String index = entry.getKey();
for (String index : autoCreateIndices) {
if (shouldAutoCreate(index, state)) {
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
createIndexRequest.index(index);
for (String type : entry.getValue()) {
createIndexRequest.mapping(type);
}
createIndexRequest.cause("auto(bulk api)");
createIndexRequest.masterNodeTimeout(bulkRequest.timeout());
createIndexAction.execute(createIndexRequest, new ActionListener<CreateIndexResponse>() {

View File

@ -91,7 +91,6 @@ public class TransportIndexAction extends TransportWriteAction<IndexRequest, Ind
if (autoCreateIndex.shouldAutoCreate(request.index(), state)) {
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
createIndexRequest.index(request.index());
createIndexRequest.mapping(request.type());
createIndexRequest.cause("auto(index api)");
createIndexRequest.masterNodeTimeout(request.timeout());
createIndexAction.execute(task, createIndexRequest, new ActionListener<CreateIndexResponse>() {

View File

@ -19,6 +19,7 @@
package org.elasticsearch.action.ingest;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@ -29,47 +30,48 @@ import org.elasticsearch.ingest.IngestDocument;
import java.io.IOException;
public class SimulateProcessorResult implements Writeable, ToXContent {
class SimulateProcessorResult implements Writeable, ToXContent {
private final String processorTag;
private final WriteableIngestDocument ingestDocument;
private final Exception failure;
public SimulateProcessorResult(String processorTag, IngestDocument ingestDocument) {
SimulateProcessorResult(String processorTag, IngestDocument ingestDocument, Exception failure) {
this.processorTag = processorTag;
this.ingestDocument = new WriteableIngestDocument(ingestDocument);
this.failure = null;
this.ingestDocument = (ingestDocument == null) ? null : new WriteableIngestDocument(ingestDocument);
this.failure = failure;
}
public SimulateProcessorResult(String processorTag, Exception failure) {
this.processorTag = processorTag;
this.failure = failure;
this.ingestDocument = null;
SimulateProcessorResult(String processorTag, IngestDocument ingestDocument) {
this(processorTag, ingestDocument, null);
}
SimulateProcessorResult(String processorTag, Exception failure) {
this(processorTag, null, failure);
}
/**
* Read from a stream.
*/
public SimulateProcessorResult(StreamInput in) throws IOException {
SimulateProcessorResult(StreamInput in) throws IOException {
this.processorTag = in.readString();
if (in.readBoolean()) {
this.failure = in.readException();
this.ingestDocument = null;
} else {
this.ingestDocument = new WriteableIngestDocument(in);
this.failure = null;
} else {
this.ingestDocument = null;
}
this.failure = in.readException();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(processorTag);
if (failure == null) {
if (ingestDocument == null) {
out.writeBoolean(false);
ingestDocument.writeTo(out);
} else {
out.writeBoolean(true);
out.writeException(failure);
ingestDocument.writeTo(out);
}
out.writeException(failure);
}
public IngestDocument getIngestDocument() {
@ -90,14 +92,23 @@ public class SimulateProcessorResult implements Writeable, ToXContent {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (processorTag != null) {
builder.field(ConfigurationUtils.TAG_KEY, processorTag);
}
if (failure == null) {
ingestDocument.toXContent(builder, params);
} else {
if (failure != null && ingestDocument != null) {
builder.startObject("ignored_error");
ElasticsearchException.renderException(builder, params, failure);
builder.endObject();
} else if (failure != null) {
ElasticsearchException.renderException(builder, params, failure);
}
if (ingestDocument != null) {
ingestDocument.toXContent(builder, params);
}
builder.endObject();
return builder;
}

View File

@ -24,7 +24,6 @@ import org.elasticsearch.ingest.IngestDocument;
import org.elasticsearch.ingest.Processor;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
@ -49,7 +48,7 @@ public final class TrackingResultProcessor implements Processor {
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument)));
} catch (Exception e) {
if (ignoreFailure) {
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument)));
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), new IngestDocument(ingestDocument), e));
} else {
processorResultList.add(new SimulateProcessorResult(actualProcessor.getTag(), e));
}

View File

@ -0,0 +1,211 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support;
import com.carrotsearch.hppc.cursors.IntObjectCursor;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import java.io.IOException;
/**
* A class whose instances represent a value for counting the number
* of active shard copies for a given shard in an index.
*/
public final class ActiveShardCount implements Writeable {
private static final int ACTIVE_SHARD_COUNT_DEFAULT = -2;
private static final int ALL_ACTIVE_SHARDS = -1;
public static final ActiveShardCount DEFAULT = new ActiveShardCount(ACTIVE_SHARD_COUNT_DEFAULT);
public static final ActiveShardCount ALL = new ActiveShardCount(ALL_ACTIVE_SHARDS);
public static final ActiveShardCount NONE = new ActiveShardCount(0);
public static final ActiveShardCount ONE = new ActiveShardCount(1);
private final int value;
private ActiveShardCount(final int value) {
this.value = value;
}
/**
* Get an ActiveShardCount instance for the given value. The value is first validated to ensure
* it is a valid shard count and throws an IllegalArgumentException if validation fails. Valid
* values are any non-negative number. Directly use {@link ActiveShardCount#DEFAULT} for the
* default value (which is one shard copy) or {@link ActiveShardCount#ALL} to specify all the shards.
*/
public static ActiveShardCount from(final int value) {
if (value < 0) {
throw new IllegalArgumentException("shard count cannot be a negative value");
}
return get(value);
}
private static ActiveShardCount get(final int value) {
switch (validateValue(value)) {
case ACTIVE_SHARD_COUNT_DEFAULT:
return DEFAULT;
case ALL_ACTIVE_SHARDS:
return ALL;
case 1:
return ONE;
case 0:
return NONE;
default:
return new ActiveShardCount(value);
}
}
@Override
public void writeTo(final StreamOutput out) throws IOException {
out.writeInt(value);
}
public static ActiveShardCount readFrom(final StreamInput in) throws IOException {
return get(in.readInt());
}
private static int validateValue(final int value) {
if (value < 0 && value != ACTIVE_SHARD_COUNT_DEFAULT && value != ALL_ACTIVE_SHARDS) {
throw new IllegalArgumentException("Invalid ActiveShardCount[" + value + "]");
}
return value;
}
/**
* Resolve this instance to an actual integer value for the number of active shard counts.
* If {@link ActiveShardCount#ALL} is specified, then the given {@link IndexMetaData} is
* used to determine what the actual active shard count should be. The default value indicates
* one active shard.
*/
public int resolve(final IndexMetaData indexMetaData) {
if (this == ActiveShardCount.DEFAULT) {
return 1;
} else if (this == ActiveShardCount.ALL) {
return indexMetaData.getNumberOfReplicas() + 1;
} else {
return value;
}
}
/**
* Parses the active shard count from the given string. Valid values are "all" for
* all shard copies, null for the default value (which defaults to one shard copy),
* or a numeric value greater than or equal to 0. Any other input will throw an
* IllegalArgumentException.
*/
public static ActiveShardCount parseString(final String str) {
if (str == null) {
return ActiveShardCount.DEFAULT;
} else if (str.equals("all")) {
return ActiveShardCount.ALL;
} else {
int val;
try {
val = Integer.parseInt(str);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("cannot parse ActiveShardCount[" + str + "]", e);
}
return ActiveShardCount.from(val);
}
}
/**
* Returns true iff the given cluster state's routing table contains enough active
* shards to meet the required shard count represented by this instance.
*/
public boolean enoughShardsActive(final ClusterState clusterState, final String indexName) {
if (this == ActiveShardCount.NONE) {
// not waiting for any active shards
return true;
}
final IndexMetaData indexMetaData = clusterState.metaData().index(indexName);
if (indexMetaData == null) {
// its possible the index was deleted while waiting for active shard copies,
// in this case, we'll just consider it that we have enough active shard copies
// and we can stop waiting
return true;
}
final IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(indexName);
assert indexRoutingTable != null;
if (indexRoutingTable.allPrimaryShardsActive() == false) {
// all primary shards aren't active yet
return false;
}
for (final IntObjectCursor<IndexShardRoutingTable> shardRouting : indexRoutingTable.getShards()) {
if (enoughShardsActive(shardRouting.value, indexMetaData) == false) {
// not enough active shard copies yet
return false;
}
}
return true;
}
/**
* Returns true iff the active shard count in the shard routing table is enough
* to meet the required shard count represented by this instance.
*/
public boolean enoughShardsActive(final IndexShardRoutingTable shardRoutingTable, final IndexMetaData indexMetaData) {
if (shardRoutingTable.activeShards().size() < resolve(indexMetaData)) {
// not enough active shard copies yet
return false;
}
return true;
}
@Override
public int hashCode() {
return Integer.hashCode(value);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
@SuppressWarnings("unchecked") ActiveShardCount that = (ActiveShardCount) o;
return value == that.value;
}
@Override
public String toString() {
final String valStr;
switch (value) {
case ALL_ACTIVE_SHARDS:
valStr = "ALL";
break;
case ACTIVE_SHARD_COUNT_DEFAULT:
valStr = "DEFAULT";
break;
default:
valStr = Integer.toString(value);
}
return "ActiveShardCount[" + valStr + "]";
}
}

View File

@ -0,0 +1,105 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.node.NodeClosedException;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.function.Consumer;
/**
* This class provides primitives for waiting for a configured number of shards
* to become active before sending a response on an {@link ActionListener}.
*/
public class ActiveShardsObserver extends AbstractComponent {
private final ClusterService clusterService;
private final ThreadPool threadPool;
public ActiveShardsObserver(final Settings settings, final ClusterService clusterService, final ThreadPool threadPool) {
super(settings);
this.clusterService = clusterService;
this.threadPool = threadPool;
}
/**
* Waits on the specified number of active shards to be started before executing the
*
* @param indexName the index to wait for active shards on
* @param activeShardCount the number of active shards to wait on before returning
* @param timeout the timeout value
* @param onResult a function that is executed in response to the requisite shards becoming active or a timeout (whichever comes first)
* @param onFailure a function that is executed in response to an error occurring during waiting for the active shards
*/
public void waitForActiveShards(final String indexName,
final ActiveShardCount activeShardCount,
final TimeValue timeout,
final Consumer<Boolean> onResult,
final Consumer<Exception> onFailure) {
// wait for the configured number of active shards to be allocated before executing the result consumer
if (activeShardCount == ActiveShardCount.NONE) {
// not waiting, so just run whatever we were to run when the waiting is
onResult.accept(true);
return;
}
final ClusterStateObserver observer = new ClusterStateObserver(clusterService, logger, threadPool.getThreadContext());
if (activeShardCount.enoughShardsActive(observer.observedState(), indexName)) {
onResult.accept(true);
} else {
final ClusterStateObserver.ChangePredicate shardsAllocatedPredicate =
new ClusterStateObserver.ValidationPredicate() {
@Override
protected boolean validate(final ClusterState newState) {
return activeShardCount.enoughShardsActive(newState, indexName);
}
};
final ClusterStateObserver.Listener observerListener = new ClusterStateObserver.Listener() {
@Override
public void onNewClusterState(ClusterState state) {
onResult.accept(true);
}
@Override
public void onClusterServiceClose() {
logger.debug("[{}] cluster service closed while waiting for enough shards to be started.", indexName);
onFailure.accept(new NodeClosedException(clusterService.localNode()));
}
@Override
public void onTimeout(TimeValue timeout) {
onResult.accept(false);
}
};
observer.waitForNextChange(observerListener, shardsAllocatedPredicate, timeout);
}
}
}

View File

@ -59,7 +59,7 @@ public abstract class HandledTransportAction<Request extends ActionRequest<Reque
@Override
public final void messageReceived(final Request request, final TransportChannel channel, Task task) throws Exception {
// We already got the task created on the netty layer - no need to create it again on the transport layer
// We already got the task created on the network layer - no need to create it again on the transport layer
execute(task, request, new ActionListener<Response>() {
@Override
public void onResponse(Response response) {

View File

@ -39,8 +39,6 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptParameterParser;
import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.ScriptService.ScriptType;
@ -637,8 +635,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
}
public UpdateRequest source(BytesReference source) throws Exception {
ScriptParameterParser scriptParameterParser = new ScriptParameterParser();
Map<String, Object> scriptParams = null;
Script script = null;
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
XContentParser.Token token = parser.nextToken();
@ -649,11 +645,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("script".equals(currentFieldName) && token == XContentParser.Token.START_OBJECT) {
//here we don't have settings available, unable to throw strict deprecation exceptions
} else if ("script".equals(currentFieldName)) {
script = Script.parse(parser, ParseFieldMatcher.EMPTY);
} else if ("params".equals(currentFieldName)) {
scriptParams = parser.map();
} else if ("scripted_upsert".equals(currentFieldName)) {
scriptedUpsert = parser.booleanValue();
} else if ("upsert".equals(currentFieldName)) {
@ -680,16 +673,6 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
if (fields != null) {
fields(fields.toArray(new String[fields.size()]));
}
} else {
//here we don't have settings available, unable to throw deprecation exceptions
scriptParameterParser.token(currentFieldName, token, parser, ParseFieldMatcher.EMPTY);
}
}
// Don't have a script using the new API so see if it is specified with the old API
if (script == null) {
ScriptParameterValue scriptValue = scriptParameterParser.getDefaultScriptParameterValue();
if (scriptValue != null) {
script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), scriptParams);
}
}
if (script != null) {

View File

@ -104,6 +104,13 @@ final class BootstrapCheck {
final List<String> errors = new ArrayList<>();
final List<String> ignoredErrors = new ArrayList<>();
if (enforceLimits) {
logger.info("bound or publishing to a non-loopback or non-link-local address, enforcing bootstrap checks");
}
if (enforceLimits && ignoreSystemChecks) {
logger.warn("enforcing bootstrap checks but ignoring system bootstrap checks, consider not ignoring system checks");
}
for (final Check check : checks) {
if (check.check()) {
if ((!enforceLimits || (check.isSystemCheck() && ignoreSystemChecks)) && !check.alwaysEnforce()) {

View File

@ -285,7 +285,7 @@ final class Security {
}
// loop through all profiles and add permissions for each one, if its valid.
// (otherwise NettyTransport is lenient and ignores it)
// (otherwise Netty transports are lenient and ignores it)
for (Map.Entry<String, Settings> entry : profiles.entrySet()) {
Settings profileSettings = entry.getValue();
String name = entry.getKey();

View File

@ -21,6 +21,9 @@ package org.elasticsearch.client.transport;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
@ -53,6 +56,7 @@ import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.search.SearchModule;
import org.elasticsearch.threadpool.ExecutorBuilder;
import org.elasticsearch.threadpool.ThreadPool;
@ -66,116 +70,120 @@ import org.elasticsearch.transport.TransportService;
* The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is
* started in client mode (only connects, no bind).
*/
public class TransportClient extends AbstractClient {
public abstract class TransportClient extends AbstractClient {
/**
* Handy method ot create a {@link org.elasticsearch.client.transport.TransportClient.Builder}.
*/
public static Builder builder() {
return new Builder();
private static PluginsService newPluginService(final Settings settings, Collection<Class<? extends Plugin>> plugins) {
final Settings.Builder settingsBuilder = Settings.builder()
.put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
.put(InternalSettingsPreparer.prepareSettings(settings))
.put(NetworkService.NETWORK_SERVER.getKey(), false)
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
return new PluginsService(settingsBuilder.build(), null, null, plugins);
}
/**
* A builder used to create an instance of the transport client.
*/
public static class Builder {
protected static Collection<Class<? extends Plugin>> addPlugins(Collection<Class<? extends Plugin>> collection,
Class<? extends Plugin>... plugins) {
return addPlugins(collection, Arrays.asList(plugins));
}
private Settings providedSettings = Settings.EMPTY;
private List<Class<? extends Plugin>> pluginClasses = new ArrayList<>();
/**
* The settings to configure the transport client with.
*/
public Builder settings(Settings.Builder settings) {
return settings(settings.build());
}
/**
* The settings to configure the transport client with.
*/
public Builder settings(Settings settings) {
this.providedSettings = settings;
return this;
}
/**
* Add the given plugin to the client when it is created.
*/
public Builder addPlugin(Class<? extends Plugin> pluginClass) {
pluginClasses.add(pluginClass);
return this;
}
private PluginsService newPluginService(final Settings settings) {
final Settings.Builder settingsBuilder = Settings.builder()
.put(TcpTransport.PING_SCHEDULE.getKey(), "5s") // enable by default the transport schedule ping interval
.put(InternalSettingsPreparer.prepareSettings(settings))
.put(NetworkService.NETWORK_SERVER.getKey(), false)
.put(CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE);
return new PluginsService(settingsBuilder.build(), null, null, pluginClasses);
}
/**
* Builds a new instance of the transport client.
*/
public TransportClient build() {
final PluginsService pluginsService = newPluginService(providedSettings);
final Settings settings = pluginsService.updatedSettings();
final List<Closeable> resourcesToClose = new ArrayList<>();
final ThreadPool threadPool = new ThreadPool(settings);
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
final NetworkService networkService = new NetworkService(settings);
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
try {
final List<Setting<?>> additionalSettings = new ArrayList<>();
final List<String> additionalSettingsFilter = new ArrayList<>();
additionalSettings.addAll(pluginsService.getPluginSettings());
additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter());
for (final ExecutorBuilder<?> builder : threadPool.builders()) {
additionalSettings.addAll(builder.getRegisteredSettings());
}
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
ModulesBuilder modules = new ModulesBuilder();
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.nodeModules()) {
modules.add(pluginModule);
}
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
modules.add(new SearchModule(settings, namedWriteableRegistry, true));
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
pluginsService.filterPlugins(ActionPlugin.class));
modules.add(actionModule);
pluginsService.processModules(modules);
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
settingsModule.getClusterSettings());
resourcesToClose.add(circuitBreakerService);
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
resourcesToClose.add(bigArrays);
modules.add(settingsModule);
modules.add((b -> {
b.bind(BigArrays.class).toInstance(bigArrays);
b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
}));
Injector injector = modules.createInjector();
final TransportService transportService = injector.getInstance(TransportService.class);
final TransportClientNodesService nodesService =
new TransportClientNodesService(settings, transportService, threadPool);
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService,
actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList()));
transportService.start();
transportService.acceptIncomingRequests();
TransportClient transportClient = new TransportClient(injector, nodesService, proxy);
resourcesToClose.clear();
return transportClient;
} finally {
IOUtils.closeWhileHandlingException(resourcesToClose);
protected static Collection<Class<? extends Plugin>> addPlugins(Collection<Class<? extends Plugin>> collection,
Collection<Class<? extends Plugin>> plugins) {
ArrayList<Class<? extends Plugin>> list = new ArrayList<>(collection);
for (Class<? extends Plugin> p : plugins) {
if (list.contains(p)) {
throw new IllegalArgumentException("plugin already exists: " + p);
}
list.add(p);
}
return list;
}
private static ClientTemplate buildTemplate(Settings providedSettings, Settings defaultSettings,
Collection<Class<? extends Plugin>> plugins) {
final PluginsService pluginsService = newPluginService(providedSettings, plugins);
final Settings settings = Settings.builder().put(defaultSettings).put(pluginsService.updatedSettings()).build();
final List<Closeable> resourcesToClose = new ArrayList<>();
final ThreadPool threadPool = new ThreadPool(settings);
resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS));
final NetworkService networkService = new NetworkService(settings);
NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
try {
final List<Setting<?>> additionalSettings = new ArrayList<>();
final List<String> additionalSettingsFilter = new ArrayList<>();
additionalSettings.addAll(pluginsService.getPluginSettings());
additionalSettingsFilter.addAll(pluginsService.getPluginSettingsFilter());
for (final ExecutorBuilder<?> builder : threadPool.builders()) {
additionalSettings.addAll(builder.getRegisteredSettings());
}
SettingsModule settingsModule = new SettingsModule(settings, additionalSettings, additionalSettingsFilter);
ModulesBuilder modules = new ModulesBuilder();
// plugin modules must be added here, before others or we can get crazy injection errors...
for (Module pluginModule : pluginsService.createGuiceModules()) {
modules.add(pluginModule);
}
modules.add(new NetworkModule(networkService, settings, true, namedWriteableRegistry));
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
modules.add(new SearchModule(settings, namedWriteableRegistry, true, pluginsService.filterPlugins(SearchPlugin.class)));
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
pluginsService.filterPlugins(ActionPlugin.class));
modules.add(actionModule);
pluginsService.processModules(modules);
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
settingsModule.getClusterSettings());
resourcesToClose.add(circuitBreakerService);
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
resourcesToClose.add(bigArrays);
modules.add(settingsModule);
modules.add((b -> {
b.bind(BigArrays.class).toInstance(bigArrays);
b.bind(PluginsService.class).toInstance(pluginsService);
b.bind(CircuitBreakerService.class).toInstance(circuitBreakerService);
}));
Injector injector = modules.createInjector();
final TransportService transportService = injector.getInstance(TransportService.class);
final TransportClientNodesService nodesService =
new TransportClientNodesService(settings, transportService, threadPool);
final TransportProxyClient proxy = new TransportProxyClient(settings, transportService, nodesService,
actionModule.getActions().values().stream().map(x -> x.getAction()).collect(Collectors.toList()));
List<LifecycleComponent> pluginLifecycleComponents = new ArrayList<>();
pluginLifecycleComponents.addAll(pluginsService.getGuiceServiceClasses().stream()
.map(injector::getInstance).collect(Collectors.toList()));
resourcesToClose.addAll(pluginLifecycleComponents);
transportService.start();
transportService.acceptIncomingRequests();
ClientTemplate transportClient = new ClientTemplate(injector, pluginLifecycleComponents, nodesService, proxy);
resourcesToClose.clear();
return transportClient;
} finally {
IOUtils.closeWhileHandlingException(resourcesToClose);
}
}
private static final class ClientTemplate {
final Injector injector;
private final List<LifecycleComponent> pluginLifecycleComponents;
private final TransportClientNodesService nodesService;
private final TransportProxyClient proxy;
private ClientTemplate(Injector injector, List<LifecycleComponent> pluginLifecycleComponents, TransportClientNodesService nodesService, TransportProxyClient proxy) {
this.injector = injector;
this.pluginLifecycleComponents = pluginLifecycleComponents;
this.nodesService = nodesService;
this.proxy = proxy;
}
Settings getSettings() {
return injector.getInstance(Settings.class);
}
ThreadPool getThreadPool() {
return injector.getInstance(ThreadPool.class);
}
}
@ -183,14 +191,33 @@ public class TransportClient extends AbstractClient {
final Injector injector;
private final List<LifecycleComponent> pluginLifecycleComponents;
private final TransportClientNodesService nodesService;
private final TransportProxyClient proxy;
private TransportClient(Injector injector, TransportClientNodesService nodesService, TransportProxyClient proxy) {
super(injector.getInstance(Settings.class), injector.getInstance(ThreadPool.class));
this.injector = injector;
this.nodesService = nodesService;
this.proxy = proxy;
/**
* Creates a new TransportClient with the given settings and plugins
*/
public TransportClient(Settings settings, Collection<Class<? extends Plugin>> plugins) {
this(buildTemplate(settings, Settings.EMPTY, plugins));
}
/**
* Creates a new TransportClient with the given settings, defaults and plugins.
* @param settings the client settings
* @param defaultSettings default settings that are merged after the plugins have added it's additional settings.
* @param plugins the client plugins
*/
protected TransportClient(Settings settings, Settings defaultSettings, Collection<Class<? extends Plugin>> plugins) {
this(buildTemplate(settings, defaultSettings, plugins));
}
private TransportClient(ClientTemplate template) {
super(template.getSettings(), template.getThreadPool());
this.injector = template.injector;
this.pluginLifecycleComponents = Collections.unmodifiableList(template.pluginLifecycleComponents);
this.nodesService = template.nodesService;
this.proxy = template.proxy;
}
/**
@ -269,8 +296,8 @@ public class TransportClient extends AbstractClient {
closeables.add(nodesService);
closeables.add(injector.getInstance(TransportService.class));
for (Class<? extends LifecycleComponent> plugin : injector.getInstance(PluginsService.class).nodeServices()) {
closeables.add(injector.getInstance(plugin));
for (LifecycleComponent plugin : pluginLifecycleComponents) {
closeables.add(plugin);
}
closeables.add(() -> ThreadPool.terminate(injector.getInstance(ThreadPool.class), 10, TimeUnit.SECONDS));
closeables.add(injector.getInstance(BigArrays.class));

View File

@ -19,16 +19,26 @@
package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* ClusterInfo is an object representing a map of nodes to {@link DiskUsage}
* and a map of shard ids to shard sizes, see
* <code>InternalClusterInfoService.shardIdentifierFromRouting(String)</code>
* for the key used in the shardSizes map
*/
public class ClusterInfo {
public class ClusterInfo implements ToXContent, Writeable {
private final ImmutableOpenMap<String, DiskUsage> leastAvailableSpaceUsage;
private final ImmutableOpenMap<String, DiskUsage> mostAvailableSpaceUsage;
final ImmutableOpenMap<String, Long> shardSizes;
@ -57,6 +67,105 @@ public class ClusterInfo {
this.routingToDataPath = routingToDataPath;
}
public ClusterInfo(StreamInput in) throws IOException {
int size = in.readInt();
Map<String, DiskUsage> leastMap = new HashMap<>(size);
for (int i = 0; i < size; i++) {
leastMap.put(in.readString(), new DiskUsage(in));
}
size = in.readInt();
Map<String, DiskUsage> mostMap = new HashMap<>(size);
for (int i = 0; i < size; i++) {
mostMap.put(in.readString(), new DiskUsage(in));
}
size = in.readInt();
Map<String, Long> sizeMap = new HashMap<>(size);
for (int i = 0; i < size; i++) {
sizeMap.put(in.readString(), in.readLong());
}
size = in.readInt();
Map<ShardRouting, String> routingMap = new HashMap<>(size);
for (int i = 0; i < size; i++) {
routingMap.put(new ShardRouting(in), in.readString());
}
ImmutableOpenMap.Builder<String, DiskUsage> leastBuilder = ImmutableOpenMap.builder();
this.leastAvailableSpaceUsage = leastBuilder.putAll(leastMap).build();
ImmutableOpenMap.Builder<String, DiskUsage> mostBuilder = ImmutableOpenMap.builder();
this.mostAvailableSpaceUsage = mostBuilder.putAll(mostMap).build();
ImmutableOpenMap.Builder<String, Long> sizeBuilder = ImmutableOpenMap.builder();
this.shardSizes = sizeBuilder.putAll(sizeMap).build();
ImmutableOpenMap.Builder<ShardRouting, String> routingBuilder = ImmutableOpenMap.builder();
this.routingToDataPath = routingBuilder.putAll(routingMap).build();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(this.leastAvailableSpaceUsage.size());
for (ObjectObjectCursor<String, DiskUsage> c : this.leastAvailableSpaceUsage) {
out.writeString(c.key);
c.value.writeTo(out);
}
out.writeVInt(this.mostAvailableSpaceUsage.size());
for (ObjectObjectCursor<String, DiskUsage> c : this.mostAvailableSpaceUsage) {
out.writeString(c.key);
c.value.writeTo(out);
}
out.writeVInt(this.shardSizes.size());
for (ObjectObjectCursor<String, Long> c : this.shardSizes) {
out.writeString(c.key);
if (c.value == null) {
out.writeLong(-1);
} else {
out.writeLong(c.value);
}
}
out.writeVInt(this.routingToDataPath.size());
for (ObjectObjectCursor<ShardRouting, String> c : this.routingToDataPath) {
c.key.writeTo(out);
out.writeString(c.value);
}
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject("nodes"); {
for (ObjectObjectCursor<String, DiskUsage> c : this.leastAvailableSpaceUsage) {
builder.startObject(c.key); { // node
builder.field("node_name", c.value.getNodeName());
builder.startObject("least_available"); {
c.value.toShortXContent(builder, params);
}
builder.endObject(); // end "least_available"
builder.startObject("most_available"); {
DiskUsage most = this.mostAvailableSpaceUsage.get(c.key);
if (most != null) {
most.toShortXContent(builder, params);
}
}
builder.endObject(); // end "most_available"
}
builder.endObject(); // end $nodename
}
}
builder.endObject(); // end "nodes"
builder.startObject("shard_sizes"); {
for (ObjectObjectCursor<String, Long> c : this.shardSizes) {
builder.byteSizeField(c.key + "_bytes", c.key, c.value);
}
}
builder.endObject(); // end "shard_sizes"
builder.startObject("shard_paths"); {
for (ObjectObjectCursor<ShardRouting, String> c : this.routingToDataPath) {
builder.field(c.key.toString(), c.value);
}
}
builder.endObject(); // end "shard_paths"
return builder;
}
/**
* Returns a node id to disk usage mapping for the path that has the least available space on the node.
*/

View File

@ -20,12 +20,19 @@
package org.elasticsearch.cluster;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
/**
* Encapsulation class used to represent the amount of disk used on a node.
*/
public class DiskUsage {
public class DiskUsage implements ToXContent, Writeable {
final String nodeId;
final String nodeName;
final String path;
@ -44,6 +51,44 @@ public class DiskUsage {
this.path = path;
}
public DiskUsage(StreamInput in) throws IOException {
this.nodeId = in.readString();
this.nodeName = in.readString();
this.path = in.readString();
this.totalBytes = in.readVLong();
this.freeBytes = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(this.nodeId);
out.writeString(this.nodeName);
out.writeString(this.path);
out.writeVLong(this.totalBytes);
out.writeVLong(this.freeBytes);
}
private static double truncatePercent(double pct) {
return Math.round(pct * 10.0) / 10.0;
}
public XContentBuilder toShortXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("path", this.path);
builder.byteSizeField("total_bytes", "total", this.totalBytes);
builder.byteSizeField("used_bytes", "used", this.getUsedBytes());
builder.byteSizeField("free_bytes", "free", this.freeBytes);
builder.field("free_disk_percent", truncatePercent(this.getFreeDiskAsPercentage()));
builder.field("used_disk_percent", truncatePercent(this.getUsedDiskAsPercentage()));
return builder;
}
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field("node_id", this.nodeId);
builder.field("node_name", this.nodeName);
builder = toShortXContent(builder, params);
return builder;
}
public String getNodeId() {
return nodeId;
}

View File

@ -19,18 +19,26 @@
package org.elasticsearch.cluster;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
/**
* Thrown when a node join request or a master ping reaches a node which is not
* currently acting as a master or when a cluster state update task is to be executed
* on a node that is no longer master.
*/
public class NotMasterException extends IllegalStateException {
public class NotMasterException extends ElasticsearchException {
public NotMasterException(String msg) {
super(msg);
}
public NotMasterException(StreamInput in) throws IOException {
super(in);
}
@Override
public Throwable fillInStackTrace() {
return null;

View File

@ -0,0 +1,40 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.ack;
/**
* A cluster state update response with specific fields for index creation.
*/
public class CreateIndexClusterStateUpdateResponse extends ClusterStateUpdateResponse {
private final boolean shardsAcked;
public CreateIndexClusterStateUpdateResponse(boolean acknowledged, boolean shardsAcked) {
super(acknowledged);
this.shardsAcked = shardsAcked;
}
/**
* Returns whether the requisite number of shard copies started before the completion of the operation.
*/
public boolean isShardsAcked() {
return shardsAcked;
}
}

View File

@ -54,7 +54,7 @@ public final class ClusterIndexHealth implements Iterable<ClusterShardHealth>, W
for (IndexShardRoutingTable shardRoutingTable : indexRoutingTable) {
int shardId = shardRoutingTable.shardId().id();
shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable));
shards.put(shardId, new ClusterShardHealth(shardId, shardRoutingTable, indexMetaData));
}
// update the index status

View File

@ -19,8 +19,12 @@
package org.elasticsearch.cluster.health;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.UnassignedInfo.Reason;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@ -37,13 +41,12 @@ public final class ClusterShardHealth implements Writeable {
private final int unassignedShards;
private final boolean primaryActive;
public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable) {
public ClusterShardHealth(final int shardId, final IndexShardRoutingTable shardRoutingTable, final IndexMetaData indexMetaData) {
this.shardId = shardId;
int computeActiveShards = 0;
int computeRelocatingShards = 0;
int computeInitializingShards = 0;
int computeUnassignedShards = 0;
boolean computePrimaryActive = false;
for (ShardRouting shardRouting : shardRoutingTable) {
if (shardRouting.active()) {
computeActiveShards++;
@ -51,9 +54,6 @@ public final class ClusterShardHealth implements Writeable {
// the shard is relocating, the one it is relocating to will be in initializing state, so we don't count it
computeRelocatingShards++;
}
if (shardRouting.primary()) {
computePrimaryActive = true;
}
} else if (shardRouting.initializing()) {
computeInitializingShards++;
} else if (shardRouting.unassigned()) {
@ -61,21 +61,22 @@ public final class ClusterShardHealth implements Writeable {
}
}
ClusterHealthStatus computeStatus;
if (computePrimaryActive) {
final ShardRouting primaryRouting = shardRoutingTable.primaryShard();
if (primaryRouting.active()) {
if (computeActiveShards == shardRoutingTable.size()) {
computeStatus = ClusterHealthStatus.GREEN;
} else {
computeStatus = ClusterHealthStatus.YELLOW;
}
} else {
computeStatus = ClusterHealthStatus.RED;
computeStatus = getInactivePrimaryHealth(primaryRouting, indexMetaData);
}
this.status = computeStatus;
this.activeShards = computeActiveShards;
this.relocatingShards = computeRelocatingShards;
this.initializingShards = computeInitializingShards;
this.unassignedShards = computeUnassignedShards;
this.primaryActive = computePrimaryActive;
this.primaryActive = primaryRouting.active();
}
public ClusterShardHealth(final StreamInput in) throws IOException {
@ -126,4 +127,36 @@ public final class ClusterShardHealth implements Writeable {
out.writeVInt(unassignedShards);
out.writeBoolean(primaryActive);
}
/**
* Checks if an inactive primary shard should cause the cluster health to go RED.
*
* Normally, an inactive primary shard in an index should cause the cluster health to be RED. However,
* there are exceptions where a health status of RED is inappropriate, namely in these scenarios:
* 1. Index Creation. When an index is first created, the primary shards are in the initializing state, so
* there is a small window where the cluster health is RED due to the primaries not being activated yet.
* However, this leads to a false sense that the cluster is in an unhealthy state, when in reality, its
* simply a case of needing to wait for the primaries to initialize.
* 2. When a cluster is in the recovery state, and the shard never had any allocation ids assigned to it,
* which indicates the index was created and before allocation of the primary occurred for this shard,
* a cluster restart happened.
*
* Here, we check for these scenarios and set the cluster health to YELLOW if any are applicable.
*
* NB: this method should *not* be called on active shards nor on non-primary shards.
*/
public static ClusterHealthStatus getInactivePrimaryHealth(final ShardRouting shardRouting, final IndexMetaData indexMetaData) {
assert shardRouting.primary() : "cannot invoke on a replica shard: " + shardRouting;
assert shardRouting.active() == false : "cannot invoke on an active shard: " + shardRouting;
assert shardRouting.unassignedInfo() != null : "cannot invoke on a shard with no UnassignedInfo: " + shardRouting;
final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
if (unassignedInfo.getLastAllocationStatus() != AllocationStatus.DECIDERS_NO
&& shardRouting.allocatedPostIndexCreate(indexMetaData) == false
&& (unassignedInfo.getReason() == Reason.INDEX_CREATED || unassignedInfo.getReason() == Reason.CLUSTER_RECOVERED)) {
return ClusterHealthStatus.YELLOW;
} else {
return ClusterHealthStatus.RED;
}
}
}

View File

@ -27,9 +27,11 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.Alias;
import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest;
import org.elasticsearch.action.support.ActiveShardsObserver;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
@ -68,6 +70,7 @@ import org.elasticsearch.indices.IndexAlreadyExistsException;
import org.elasticsearch.indices.IndexCreationException;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidIndexNameException;
import org.elasticsearch.threadpool.ThreadPool;
import org.joda.time.DateTime;
import org.joda.time.DateTimeZone;
@ -108,13 +111,15 @@ public class MetaDataCreateIndexService extends AbstractComponent {
private final Environment env;
private final NodeServicesProvider nodeServicesProvider;
private final IndexScopedSettings indexScopedSettings;
private final ActiveShardsObserver activeShardsObserver;
@Inject
public MetaDataCreateIndexService(Settings settings, ClusterService clusterService,
IndicesService indicesService, AllocationService allocationService,
AliasValidator aliasValidator,
Set<IndexTemplateFilter> indexTemplateFilters, Environment env, NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings) {
Set<IndexTemplateFilter> indexTemplateFilters, Environment env,
NodeServicesProvider nodeServicesProvider, IndexScopedSettings indexScopedSettings,
ThreadPool threadPool) {
super(settings);
this.clusterService = clusterService;
this.indicesService = indicesService;
@ -135,6 +140,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
this.indexTemplateFilter = new IndexTemplateFilter.Compound(templateFilters);
}
this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool);
}
public void validateIndexName(String index, ClusterState state) {
@ -176,7 +182,38 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
}
public void createIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
/**
* Creates an index in the cluster state and waits for the specified number of shard copies to
* become active (as specified in {@link CreateIndexClusterStateUpdateRequest#waitForActiveShards()})
* before sending the response on the listener. If the index creation was successfully applied on
* the cluster state, then {@link CreateIndexClusterStateUpdateResponse#isAcknowledged()} will return
* true, otherwise it will return false and no waiting will occur for started shards
* ({@link CreateIndexClusterStateUpdateResponse#isShardsAcked()} will also be false). If the index
* creation in the cluster state was successful and the requisite shard copies were started before
* the timeout, then {@link CreateIndexClusterStateUpdateResponse#isShardsAcked()} will
* return true, otherwise if the operation timed out, then it will return false.
*
* @param request the index creation cluster state update request
* @param listener the listener on which to send the index creation cluster state update response
*/
public void createIndex(final CreateIndexClusterStateUpdateRequest request,
final ActionListener<CreateIndexClusterStateUpdateResponse> listener) {
onlyCreateIndex(request, ActionListener.wrap(response -> {
if (response.isAcknowledged()) {
activeShardsObserver.waitForActiveShards(request.index(), request.waitForActiveShards(), request.ackTimeout(),
shardsAcked -> {
logger.debug("[{}] index created, but the operation timed out while waiting for " +
"enough shards to be started.", request.index());
listener.onResponse(new CreateIndexClusterStateUpdateResponse(response.isAcknowledged(), shardsAcked));
}, listener::onFailure);
} else {
listener.onResponse(new CreateIndexClusterStateUpdateResponse(false, false));
}
}, listener::onFailure));
}
private void onlyCreateIndex(final CreateIndexClusterStateUpdateRequest request,
final ActionListener<ClusterStateUpdateResponse> listener) {
Settings.Builder updatedSettingsBuilder = Settings.builder();
updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
indexScopedSettings.validate(updatedSettingsBuilder);
@ -308,6 +345,11 @@ public class MetaDataCreateIndexService extends AbstractComponent {
.setRoutingNumShards(routingNumShards);
// Set up everything, now locally create the index to see that things are ok, and apply
final IndexMetaData tmpImd = tmpImdBuilder.settings(actualIndexSettings).build();
if (request.waitForActiveShards().resolve(tmpImd) > tmpImd.getNumberOfReplicas() + 1) {
throw new IllegalArgumentException("invalid wait_for_active_shards[" + request.waitForActiveShards() +
"]: cannot be greater than number of shard copies [" +
(tmpImd.getNumberOfReplicas() + 1) + "]");
}
// create the index here (on the master) to validate it can be created, as well as adding the mapping
final IndexService indexService = indicesService.createIndex(nodeServicesProvider, tmpImd, Collections.emptyList());
createdIndex = indexService.index();
@ -408,6 +450,16 @@ public class MetaDataCreateIndexService extends AbstractComponent {
}
}
}
@Override
public void onFailure(String source, Exception e) {
if (e instanceof IndexAlreadyExistsException) {
logger.trace("[{}] failed to create", e, request.index());
} else {
logger.debug("[{}] failed to create", e, request.index());
}
super.onFailure(source, e);
}
});
}

View File

@ -48,23 +48,6 @@ import static org.elasticsearch.common.transport.TransportAddressSerializers.add
*/
public class DiscoveryNode implements Writeable, ToXContent {
public static boolean isLocalNode(Settings settings) {
if (Node.NODE_LOCAL_SETTING.exists(settings)) {
return Node.NODE_LOCAL_SETTING.get(settings);
}
if (Node.NODE_MODE_SETTING.exists(settings)) {
String nodeMode = Node.NODE_MODE_SETTING.get(settings);
if ("local".equals(nodeMode)) {
return true;
} else if ("network".equals(nodeMode)) {
return false;
} else {
throw new IllegalArgumentException("unsupported node.mode [" + nodeMode + "]. Should be one of [local, network].");
}
}
return false;
}
public static boolean nodeRequiresLocalStorage(Settings settings) {
boolean localStorageEnable = Node.NODE_LOCAL_STORAGE_SETTING.get(settings);
if (localStorageEnable == false &&
@ -99,6 +82,24 @@ public class DiscoveryNode implements Writeable, ToXContent {
private final Version version;
private final Set<Role> roles;
/**
* Creates a new {@link DiscoveryNode}
* <p>
* <b>Note:</b> if the version of the node is unknown {@link Version#minimumCompatibilityVersion()} should be used for the current
* version. it corresponds to the minimum version this elasticsearch version can communicate with. If a higher version is used
* the node might not be able to communicate with the remove node. After initial handshakes node versions will be discovered
* and updated.
* </p>
*
* @param id the nodes unique (persistent) node id. This constructor will auto generate a random ephemeral id.
* @param address the nodes transport address
* @param version the version of the node
*/
public DiscoveryNode(final String id, TransportAddress address, Version version) {
this(id, address, Collections.emptyMap(), Collections.emptySet(), version);
}
/**
* Creates a new {@link DiscoveryNode}
* <p>

View File

@ -25,6 +25,7 @@ import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.Tuple;
@ -641,14 +642,27 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* Should be used with caution, typically,
* the correct usage is to removeAndIgnore from the iterator.
* @see #ignored()
* @see UnassignedIterator#removeAndIgnore()
* @see UnassignedIterator#removeAndIgnore(AllocationStatus)
* @see #isIgnoredEmpty()
* @return true iff the decision caused a change to the unassigned info
*/
public void ignoreShard(ShardRouting shard) {
public boolean ignoreShard(ShardRouting shard, AllocationStatus allocationStatus) {
boolean changed = false;
if (shard.primary()) {
ignoredPrimaries++;
UnassignedInfo currInfo = shard.unassignedInfo();
assert currInfo != null;
if (allocationStatus.equals(currInfo.getLastAllocationStatus()) == false) {
UnassignedInfo newInfo = new UnassignedInfo(currInfo.getReason(), currInfo.getMessage(), currInfo.getFailure(),
currInfo.getNumFailedAllocations(), currInfo.getUnassignedTimeInNanos(),
currInfo.getUnassignedTimeInMillis(), currInfo.isDelayed(),
allocationStatus);
shard = shard.updateUnassignedInfo(newInfo);
changed = true;
}
}
ignored.add(shard);
return changed;
}
public class UnassignedIterator implements Iterator<ShardRouting> {
@ -685,10 +699,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
* will be added back to unassigned once the metadata is constructed again).
* Typically this is used when an allocation decision prevents a shard from being allocated such
* that subsequent consumers of this API won't try to allocate this shard again.
*
* @param attempt the result of the allocation attempt
* @return true iff the decision caused an update to the unassigned info
*/
public void removeAndIgnore() {
public boolean removeAndIgnore(AllocationStatus attempt) {
innerRemove();
ignoreShard(current);
return ignoreShard(current, attempt);
}
private void updateShardRouting(ShardRouting shardRouting) {
@ -721,7 +738,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
}
/**
* Unsupported operation, just there for the interface. Use {@link #removeAndIgnore()} or
* Unsupported operation, just there for the interface. Use {@link #removeAndIgnore(AllocationStatus)} or
* {@link #initialize(String, String, long)}.
*/
@Override
@ -747,8 +764,8 @@ public class RoutingNodes implements Iterable<RoutingNode> {
/**
* Returns <code>true</code> iff any unassigned shards are marked as temporarily ignored.
* @see UnassignedShards#ignoreShard(ShardRouting)
* @see UnassignedIterator#removeAndIgnore()
* @see UnassignedShards#ignoreShard(ShardRouting, AllocationStatus)
* @see UnassignedIterator#removeAndIgnore(AllocationStatus)
*/
public boolean isIgnoredEmpty() {
return ignored.isEmpty();
@ -878,6 +895,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
assert inactiveShardCount == routingNodes.inactiveShardCount :
"Inactive Shard count [" + inactiveShardCount + "] but RoutingNodes returned inactive shards [" + routingNodes.inactiveShardCount + "]";
assert routingNodes.getRelocatingShardCount() == relocating : "Relocating shards mismatch [" + routingNodes.getRelocatingShardCount() + "] but expected [" + relocating + "]";
return true;
}

View File

@ -237,10 +237,9 @@ public final class ShardRouting implements Writeable, ToXContent {
return true;
}
// unassigned info is only cleared when a shard moves to started, so
// for unassigned and initializing (we checked for active() before),
// we can safely assume it is there
if (unassignedInfo.getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
// initializing replica might not have unassignedInfo
assert unassignedInfo != null || (primary == false && state == ShardRoutingState.INITIALIZING);
if (unassignedInfo != null && unassignedInfo.getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
return false;
}

View File

@ -22,6 +22,7 @@ package org.elasticsearch.cluster.routing;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -36,6 +37,8 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
/**
* Holds additional information as to why the shard is in unassigned state.
@ -105,7 +108,94 @@ public final class UnassignedInfo implements ToXContent, Writeable {
/**
* Unassigned as a result of a failed primary while the replica was initializing.
*/
PRIMARY_FAILED;
PRIMARY_FAILED
}
/**
* Captures the status of an unsuccessful allocation attempt for the shard,
* causing it to remain in the unassigned state.
*
* Note, ordering of the enum is important, make sure to add new values
* at the end and handle version serialization properly.
*/
public enum AllocationStatus implements Writeable {
/**
* The shard was denied allocation to a node because the allocation deciders all returned a NO decision
*/
DECIDERS_NO((byte) 0),
/**
* The shard was denied allocation to a node because there were no valid shard copies found for it;
* this can happen on node restart with gateway allocation
*/
NO_VALID_SHARD_COPY((byte) 1),
/**
* The allocation attempt was throttled on the shard by the allocation deciders
*/
DECIDERS_THROTTLED((byte) 2),
/**
* Waiting on getting shard data from all nodes before making a decision about where to allocate the shard
*/
FETCHING_SHARD_DATA((byte) 3),
/**
* Allocation decision has been delayed
*/
DELAYED_ALLOCATION((byte) 4),
/**
* No allocation attempt has been made yet
*/
NO_ATTEMPT((byte) 5);
private final byte id;
AllocationStatus(byte id) {
this.id = id;
}
// package private for testing
byte getId() {
return id;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeByte(id);
}
public static AllocationStatus readFrom(StreamInput in) throws IOException {
byte id = in.readByte();
switch (id) {
case 0:
return DECIDERS_NO;
case 1:
return NO_VALID_SHARD_COPY;
case 2:
return DECIDERS_THROTTLED;
case 3:
return FETCHING_SHARD_DATA;
case 4:
return DELAYED_ALLOCATION;
case 5:
return NO_ATTEMPT;
default:
throw new IllegalArgumentException("Unknown AllocationStatus value [" + id + "]");
}
}
public static AllocationStatus fromDecision(Decision decision) {
Objects.requireNonNull(decision);
switch (decision.type()) {
case NO:
return DECIDERS_NO;
case THROTTLE:
return DECIDERS_THROTTLED;
default:
throw new IllegalArgumentException("no allocation attempt from decision[" + decision.type() + "]");
}
}
public String value() {
return toString().toLowerCase(Locale.ROOT);
}
}
private final Reason reason;
@ -115,6 +205,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
private final String message;
private final Exception failure;
private final int failedAllocations;
private final AllocationStatus lastAllocationStatus; // result of the last allocation attempt for this shard
/**
* creates an UnassignedInfo object based on **current** time
@ -123,7 +214,8 @@ public final class UnassignedInfo implements ToXContent, Writeable {
* @param message more information about cause.
**/
public UnassignedInfo(Reason reason, String message) {
this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis(), false);
this(reason, message, null, reason == Reason.ALLOCATION_FAILED ? 1 : 0, System.nanoTime(), System.currentTimeMillis(), false,
AllocationStatus.NO_ATTEMPT);
}
/**
@ -133,16 +225,18 @@ public final class UnassignedInfo implements ToXContent, Writeable {
* @param unassignedTimeNanos the time to use as the base for any delayed re-assignment calculation
* @param unassignedTimeMillis the time of unassignment used to display to in our reporting.
* @param delayed if allocation of this shard is delayed due to INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.
* @param lastAllocationStatus the result of the last allocation attempt for this shard
*/
public UnassignedInfo(Reason reason, @Nullable String message, @Nullable Exception failure, int failedAllocations,
long unassignedTimeNanos, long unassignedTimeMillis, boolean delayed) {
this.reason = reason;
long unassignedTimeNanos, long unassignedTimeMillis, boolean delayed, AllocationStatus lastAllocationStatus) {
this.reason = Objects.requireNonNull(reason);
this.unassignedTimeMillis = unassignedTimeMillis;
this.unassignedTimeNanos = unassignedTimeNanos;
this.delayed = delayed;
this.message = message;
this.failure = failure;
this.failedAllocations = failedAllocations;
this.lastAllocationStatus = Objects.requireNonNull(lastAllocationStatus);
assert (failedAllocations > 0) == (reason == Reason.ALLOCATION_FAILED) :
"failedAllocations: " + failedAllocations + " for reason " + reason;
assert !(message == null && failure != null) : "provide a message if a failure exception is provided";
@ -159,6 +253,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
this.message = in.readOptionalString();
this.failure = in.readException();
this.failedAllocations = in.readVInt();
this.lastAllocationStatus = AllocationStatus.readFrom(in);
}
public void writeTo(StreamOutput out) throws IOException {
@ -169,6 +264,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
out.writeOptionalString(message);
out.writeException(failure);
out.writeVInt(failedAllocations);
lastAllocationStatus.writeTo(out);
}
public UnassignedInfo readFrom(StreamInput in) throws IOException {
@ -240,6 +336,13 @@ public final class UnassignedInfo implements ToXContent, Writeable {
return message + (failure == null ? "" : ", failure " + ExceptionsHelper.detailedMessage(failure));
}
/**
* Get the status for the last allocation attempt for this shard.
*/
public AllocationStatus getLastAllocationStatus() {
return lastAllocationStatus;
}
/**
* Calculates the delay left based on current time (in nanoseconds) and the delay defined by the index settings.
* Only relevant if shard is effectively delayed (see {@link #isDelayed()})
@ -302,6 +405,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
if (details != null) {
sb.append(", details[").append(details).append("]");
}
sb.append(", allocation_status[").append(lastAllocationStatus.value()).append("]");
return sb.toString();
}
@ -323,6 +427,7 @@ public final class UnassignedInfo implements ToXContent, Writeable {
if (details != null) {
builder.field("details", details);
}
builder.field("allocation_status", lastAllocationStatus.value());
builder.endObject();
return builder;
}
@ -353,17 +458,22 @@ public final class UnassignedInfo implements ToXContent, Writeable {
if (message != null ? !message.equals(that.message) : that.message != null) {
return false;
}
if (lastAllocationStatus != that.lastAllocationStatus) {
return false;
}
return !(failure != null ? !failure.equals(that.failure) : that.failure != null);
}
@Override
public int hashCode() {
int result = reason != null ? reason.hashCode() : 0;
int result = reason.hashCode();
result = 31 * result + Boolean.hashCode(delayed);
result = 31 * result + Integer.hashCode(failedAllocations);
result = 31 * result + Long.hashCode(unassignedTimeMillis);
result = 31 * result + (message != null ? message.hashCode() : 0);
result = 31 * result + (failure != null ? failure.hashCode() : 0);
result = 31 * result + lastAllocationStatus.hashCode();
return result;
}
}

View File

@ -34,6 +34,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
@ -232,7 +233,7 @@ public class AllocationService extends AbstractComponent {
UnassignedInfo unassignedInfo = failedShard.shard.unassignedInfo();
final int failedAllocations = unassignedInfo != null ? unassignedInfo.getNumFailedAllocations() : 0;
changed |= applyFailedShard(allocation, failedShard.shard, true, new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, failedShard.message, failedShard.failure,
failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false));
failedAllocations + 1, currentNanoTime, System.currentTimeMillis(), false, AllocationStatus.NO_ATTEMPT));
}
if (!changed) {
return new RoutingAllocation.Result(false, clusterState.routingTable(), clusterState.metaData());
@ -259,7 +260,8 @@ public class AllocationService extends AbstractComponent {
if (newComputedLeftDelayNanos == 0) {
changed = true;
unassignedIterator.updateUnassignedInfo(new UnassignedInfo(unassignedInfo.getReason(), unassignedInfo.getMessage(), unassignedInfo.getFailure(),
unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), false));
unassignedInfo.getNumFailedAllocations(), unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), false,
unassignedInfo.getLastAllocationStatus()));
}
}
}
@ -417,7 +419,7 @@ public class AllocationService extends AbstractComponent {
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0;
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]",
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed);
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed, AllocationStatus.NO_ATTEMPT);
applyFailedShard(allocation, shardRouting, false, unassignedInfo);
}
// its a dead node, remove it, note, its important to remove it *after* we apply failed shard
@ -438,7 +440,8 @@ public class AllocationService extends AbstractComponent {
for (ShardRouting routing : replicas) {
changed |= applyFailedShard(allocation, routing, false,
new UnassignedInfo(UnassignedInfo.Reason.PRIMARY_FAILED, "primary failed while replica initializing",
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false));
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false,
AllocationStatus.NO_ATTEMPT));
}
return changed;
}

View File

@ -28,6 +28,7 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
@ -647,11 +648,12 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
for (int i = 0; i < primaryLength; i++) {
ShardRouting shard = primary[i];
if (!shard.primary()) {
boolean drop = deciders.canAllocate(shard, allocation).type() == Type.NO;
if (drop) {
unassigned.ignoreShard(shard);
final Decision decision = deciders.canAllocate(shard, allocation);
if (decision.type() == Type.NO) {
UnassignedInfo.AllocationStatus allocationStatus = UnassignedInfo.AllocationStatus.fromDecision(decision);
changed |= unassigned.ignoreShard(shard, allocationStatus);
while(i < primaryLength-1 && comparator.compare(primary[i], primary[i+1]) == 0) {
unassigned.ignoreShard(primary[++i]);
changed |= unassigned.ignoreShard(primary[++i], allocationStatus);
}
continue;
} else {
@ -701,9 +703,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
final int minNodeHigh = minNode.highestPrimary(shard.getIndexName());
if ((((nodeHigh > repId && minNodeHigh > repId) || (nodeHigh < repId && minNodeHigh < repId)) && (nodeHigh < minNodeHigh))
|| (nodeHigh > minNodeHigh && nodeHigh > repId && minNodeHigh < repId)) {
minNode = node;
minWeight = currentWeight;
decision = currentDecision;
// nothing to set here; the minNode, minWeight, and decision get set below
} else {
break NOUPDATE;
}
@ -719,7 +719,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
}
}
}
assert decision != null && minNode != null || decision == null && minNode == null;
assert (decision == null) == (minNode == null);
if (minNode != null) {
final long shardSize = DiskThresholdDecider.getExpectedShardSize(shard, allocation,
ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
@ -735,10 +735,12 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
} else {
minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize));
final RoutingNode node = minNode.getRoutingNode();
if (deciders.canAllocate(node, allocation).type() != Type.YES) {
final Decision.Type nodeLevelDecision = deciders.canAllocate(node, allocation).type();
if (nodeLevelDecision != Type.YES) {
if (logger.isTraceEnabled()) {
logger.trace("Can not allocate on node [{}] remove from round decision [{}]", node, decision.type());
}
assert nodeLevelDecision == Type.NO;
throttledNodes.add(minNode);
}
}
@ -748,10 +750,14 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
} else if (logger.isTraceEnabled()) {
logger.trace("No Node found to assign shard [{}]", shard);
}
unassigned.ignoreShard(shard);
assert decision == null || decision.type() == Type.THROTTLE;
UnassignedInfo.AllocationStatus allocationStatus =
decision == null ? UnassignedInfo.AllocationStatus.DECIDERS_NO :
UnassignedInfo.AllocationStatus.fromDecision(decision);
changed |= unassigned.ignoreShard(shard, allocationStatus);
if (!shard.primary()) { // we could not allocate it and we are a replica - check if we can ignore the other replicas
while(secondaryLength > 0 && comparator.compare(shard, secondary[secondaryLength-1]) == 0) {
unassigned.ignoreShard(secondary[--secondaryLength]);
changed |= unassigned.ignoreShard(secondary[--secondaryLength], allocationStatus);
}
}
}

View File

@ -39,8 +39,6 @@ import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
import java.util.Objects;
import java.util.function.Consumer;
import java.util.function.Function;
/**
* Abstract base class for allocating an unassigned shard to a node

View File

@ -125,7 +125,8 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation
// we need to move the unassigned info back to treat it as if it was index creation
unassignedInfoToUpdate = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED,
"force empty allocation from previous reason " + shardRouting.unassignedInfo().getReason() + ", " + shardRouting.unassignedInfo().getMessage(),
shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false);
shardRouting.unassignedInfo().getFailure(), 0, System.nanoTime(), System.currentTimeMillis(), false,
shardRouting.unassignedInfo().getLastAllocationStatus());
}
initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, unassignedInfoToUpdate);

View File

@ -19,6 +19,7 @@
package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
@ -29,11 +30,12 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
/**
* This abstract class defining basic {@link Decision} used during shard
* allocation process.
*
*
* @see AllocationDecider
*/
public abstract class Decision implements ToXContent {
@ -44,7 +46,7 @@ public abstract class Decision implements ToXContent {
public static final Decision THROTTLE = new Single(Type.THROTTLE);
/**
* Creates a simple decision
* Creates a simple decision
* @param type {@link Type} of the decision
* @param label label for the Decider that produced this decision
* @param explanation explanation of the decision
@ -95,10 +97,10 @@ public abstract class Decision implements ToXContent {
}
/**
* This enumeration defines the
* possible types of decisions
* This enumeration defines the
* possible types of decisions
*/
public static enum Type {
public enum Type {
YES,
NO,
THROTTLE;
@ -144,6 +146,7 @@ public abstract class Decision implements ToXContent {
*/
public abstract Type type();
@Nullable
public abstract String label();
/**
@ -166,7 +169,7 @@ public abstract class Decision implements ToXContent {
}
/**
* Creates a new {@link Single} decision of a given type
* Creates a new {@link Single} decision of a given type
* @param type {@link Type} of the decision
*/
public Single(Type type) {
@ -175,12 +178,12 @@ public abstract class Decision implements ToXContent {
/**
* Creates a new {@link Single} decision of a given type
*
*
* @param type {@link Type} of the decision
* @param explanation An explanation of this {@link Decision}
* @param explanationParams A set of additional parameters
*/
public Single(Type type, String label, String explanation, Object... explanationParams) {
public Single(Type type, @Nullable String label, @Nullable String explanation, @Nullable Object... explanationParams) {
this.type = type;
this.label = label;
this.explanation = explanation;
@ -193,6 +196,7 @@ public abstract class Decision implements ToXContent {
}
@Override
@Nullable
public String label() {
return this.label;
}
@ -205,6 +209,7 @@ public abstract class Decision implements ToXContent {
/**
* Returns the explanation string, fully formatted. Only formats the string once
*/
@Nullable
public String getExplanation() {
if (explanationString == null && explanation != null) {
explanationString = String.format(Locale.ROOT, explanation, explanationParams);
@ -224,15 +229,16 @@ public abstract class Decision implements ToXContent {
Decision.Single s = (Decision.Single) object;
return this.type == s.type &&
this.label.equals(s.label) &&
this.getExplanation().equals(s.getExplanation());
Objects.equals(label, s.label) &&
Objects.equals(getExplanation(), s.getExplanation());
}
@Override
public int hashCode() {
int result = this.type.hashCode();
result = 31 * result + this.label.hashCode();
result = 31 * result + this.getExplanation().hashCode();
int result = type.hashCode();
result = 31 * result + (label == null ? 0 : label.hashCode());
String explanationStr = getExplanation();
result = 31 * result + (explanationStr == null ? 0 : explanationStr.hashCode());
return result;
}
@ -288,6 +294,7 @@ public abstract class Decision implements ToXContent {
}
@Override
@Nullable
public String label() {
// Multi decisions have no labels
return null;

View File

@ -21,6 +21,7 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
@ -177,7 +178,12 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
if (shardRouting.unassigned()) {
initializingShard = shardRouting.initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else if (shardRouting.initializing()) {
initializingShard = shardRouting.moveToUnassigned(shardRouting.unassignedInfo())
UnassignedInfo unassignedInfo = shardRouting.unassignedInfo();
if (unassignedInfo == null) {
// unassigned shards must have unassignedInfo (initializing shards might not)
unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.ALLOCATION_FAILED, "fake");
}
initializingShard = shardRouting.moveToUnassigned(unassignedInfo)
.initialize(currentNodeId, null, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE);
} else if (shardRouting.relocating()) {
initializingShard = shardRouting.cancelRelocation()

View File

@ -84,9 +84,6 @@ import java.util.stream.Collectors;
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
/**
*
*/
public class ClusterService extends AbstractLifecycleComponent {
public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING =
@ -348,6 +345,7 @@ public class ClusterService extends AbstractLifecycleComponent {
* @param source the source of the cluster state update task
* @param updateTask the full context for the cluster state update
* task
*
*/
public void submitStateUpdateTask(final String source, final ClusterStateUpdateTask updateTask) {
submitStateUpdateTask(source, updateTask, updateTask, updateTask, updateTask);
@ -371,6 +369,7 @@ public class ClusterService extends AbstractLifecycleComponent {
* @param listener callback after the cluster state update task
* completes
* @param <T> the type of the cluster state update task state
*
*/
public <T> void submitStateUpdateTask(final String source, final T task,
final ClusterStateTaskConfig config,
@ -390,6 +389,7 @@ public class ClusterService extends AbstractLifecycleComponent {
* that share the same executor will be executed
* batches on this executor
* @param <T> the type of the cluster state update task state
*
*/
public <T> void submitStateUpdateTasks(final String source,
final Map<T, ClusterStateTaskListener> tasks, final ClusterStateTaskConfig config,
@ -411,7 +411,7 @@ public class ClusterService extends AbstractLifecycleComponent {
List<UpdateTask> existingTasks = updateTasksPerExecutor.computeIfAbsent(executor, k -> new ArrayList<>());
for (@SuppressWarnings("unchecked") UpdateTask<T> existing : existingTasks) {
if (tasksIdentity.containsKey(existing.task)) {
throw new IllegalArgumentException("task [" + existing.task + "] is already queued");
throw new IllegalStateException("task [" + existing.task + "] with source [" + source + "] is already queued");
}
}
existingTasks.addAll(updateTasks);

View File

@ -16,6 +16,7 @@
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import org.elasticsearch.common.io.stream.StreamInput;
@ -23,94 +24,48 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
*
*/
public final class Priority implements Comparable<Priority> {
public enum Priority {
IMMEDIATE((byte) 0),
URGENT((byte) 1),
HIGH((byte) 2),
NORMAL((byte) 3),
LOW((byte) 4),
LANGUID((byte) 5);
public static Priority readFrom(StreamInput input) throws IOException {
return fromByte(input.readByte());
}
public static void writeTo(Priority priority, StreamOutput output) throws IOException {
byte b = priority.value;
output.writeByte(b);
output.writeByte(priority.value);
}
public static Priority fromByte(byte b) {
switch (b) {
case -1: return IMMEDIATE;
case 0: return URGENT;
case 1: return HIGH;
case 2: return NORMAL;
case 3: return LOW;
case 4: return LANGUID;
case 0: return IMMEDIATE;
case 1: return URGENT;
case 2: return HIGH;
case 3: return NORMAL;
case 4: return LOW;
case 5: return LANGUID;
default:
throw new IllegalArgumentException("can't find priority for [" + b + "]");
}
}
public static final Priority IMMEDIATE = new Priority((byte) -1);
public static final Priority URGENT = new Priority((byte) 0);
public static final Priority HIGH = new Priority((byte) 1);
public static final Priority NORMAL = new Priority((byte) 2);
public static final Priority LOW = new Priority((byte) 3);
public static final Priority LANGUID = new Priority((byte) 4);
private static final Priority[] values = new Priority[] { IMMEDIATE, URGENT, HIGH, NORMAL, LOW, LANGUID };
private final byte value;
private Priority(byte value) {
Priority(byte value) {
this.value = value;
}
/**
* @return an array of all available priorities, sorted from the highest to the lowest.
*/
public static Priority[] values() {
return values;
}
@Override
public int compareTo(Priority p) {
return (this.value < p.value) ? -1 : ((this.value > p.value) ? 1 : 0);
}
public boolean after(Priority p) {
return value > p.value;
return this.compareTo(p) > 0;
}
public boolean sameOrAfter(Priority p) {
return value >= p.value;
return this.compareTo(p) >= 0;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || Priority.class != o.getClass()) return false;
Priority priority = (Priority) o;
if (value != priority.value) return false;
return true;
}
@Override
public int hashCode() {
return (int) value;
}
@Override
public String toString() {
switch (value) {
case (byte) -1: return "IMMEDIATE";
case (byte) 0: return "URGENT";
case (byte) 1: return "HIGH";
case (byte) 2: return "NORMAL";
case (byte) 3: return "LOW";
default:
return "LANGUID";
}
}
}

View File

@ -99,35 +99,6 @@ public interface BlobContainer {
*/
void deleteBlob(String blobName) throws IOException;
/**
* Deletes blobs with the given names. If any subset of the names do not exist in the container, this method has no
* effect for those names, and will delete the blobs for those names that do exist. If any of the blobs failed
* to delete, those blobs that were processed before it and successfully deleted will remain deleted. An exception
* is thrown at the first blob entry that fails to delete (TODO: is this the right behavior? Should we collect
* all the failed deletes into a single IOException instead?)
*
* TODO: remove, see https://github.com/elastic/elasticsearch/issues/18529
*
* @param blobNames
* The collection of blob names to delete from the container.
* @throws IOException if any of the blobs in the collection exists but could not be deleted.
*/
void deleteBlobs(Collection<String> blobNames) throws IOException;
/**
* Deletes all blobs in the container that match the specified prefix. If any of the blobs failed to delete,
* those blobs that were processed before it and successfully deleted will remain deleted. An exception is
* thrown at the first blob entry that fails to delete (TODO: is this the right behavior? Should we collect
* all the failed deletes into a single IOException instead?)
*
* TODO: remove, see: https://github.com/elastic/elasticsearch/issues/18529
*
* @param blobNamePrefix
* The prefix to match against blob names in the container. Any blob whose name has the prefix will be deleted.
* @throws IOException if any of the matching blobs failed to delete.
*/
void deleteBlobsByPrefix(String blobNamePrefix) throws IOException;
/**
* Lists all blobs in the container.
*

View File

@ -45,21 +45,6 @@ public abstract class AbstractBlobContainer implements BlobContainer {
return this.path;
}
@Override
public void deleteBlobsByPrefix(final String blobNamePrefix) throws IOException {
Map<String, BlobMetaData> blobs = listBlobsByPrefix(blobNamePrefix);
for (BlobMetaData blob : blobs.values()) {
deleteBlob(blob.name());
}
}
@Override
public void deleteBlobs(Collection<String> blobNames) throws IOException {
for (String blob: blobNames) {
deleteBlob(blob);
}
}
@Override
public void writeBlob(String blobName, BytesReference bytes) throws IOException {
try (InputStream stream = bytes.streamInput()) {

View File

@ -19,11 +19,9 @@
package org.elasticsearch.common.compress;
import org.apache.lucene.store.IndexInput;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.jboss.netty.buffer.ChannelBuffer;
import java.io.IOException;

View File

@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.Collection;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.util.BitUtil;
/**
@ -176,6 +177,26 @@ public class GeoHashUtils {
return BASE_32[((x & 1) + ((y & 1) * 2) + ((x & 2) * 2) + ((y & 2) * 4) + ((x & 4) * 4)) % 32];
}
/**
* Computes the bounding box coordinates from a given geohash
*
* @param geohash Geohash of the defined cell
* @return GeoRect rectangle defining the bounding box
*/
public static Rectangle bbox(final String geohash) {
// bottom left is the coordinate
GeoPoint bottomLeft = GeoPoint.fromGeohash(geohash);
long ghLong = longEncode(geohash);
// shift away the level
ghLong >>>= 4;
// deinterleave and add 1 to lat and lon to get topRight
long lat = BitUtil.deinterleave(ghLong >>> 1) + 1;
long lon = BitUtil.deinterleave(ghLong) + 1;
GeoPoint topRight = GeoPoint.fromGeohash(BitUtil.interleave((int)lon, (int)lat) << 4 | geohash.length());
return new Rectangle(bottomLeft.lat(), topRight.lat(), bottomLeft.lon(), topRight.lon());
}
/**
* Calculate all neighbors of a given geohash cell.
*

View File

@ -20,8 +20,6 @@
package org.elasticsearch.common.network;
import org.elasticsearch.action.support.replication.ReplicationTask;
import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.client.transport.support.TransportProxyClient;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand;
import org.elasticsearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand;
@ -41,13 +39,11 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.tasks.RawTaskStatus;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.local.LocalTransport;
import org.elasticsearch.transport.netty.NettyTransport;
/**
* A module to handle registering and binding all network related classes.
@ -58,8 +54,9 @@ public class NetworkModule extends AbstractModule {
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
public static final String HTTP_TYPE_KEY = "http.type";
public static final String LOCAL_TRANSPORT = "local";
public static final String NETTY_TRANSPORT = "netty";
public static final Setting<String> TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString("transport.type.default", Property.NodeScope);
public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString("http.type.default", Property.NodeScope);
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope);
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope);
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING =
@ -89,16 +86,11 @@ public class NetworkModule extends AbstractModule {
this.settings = settings;
this.transportClient = transportClient;
this.namedWriteableRegistry = namedWriteableRegistry;
registerTransportService(NETTY_TRANSPORT, TransportService.class);
registerTransportService("default", TransportService.class);
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
registerTransport(NETTY_TRANSPORT, NettyTransport.class);
registerTaskStatus(ReplicationTask.Status.NAME, ReplicationTask.Status::new);
registerTaskStatus(RawTaskStatus.NAME, RawTaskStatus::new);
registerBuiltinAllocationCommands();
if (transportClient == false) {
registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class);
}
}
public boolean isTransportClient() {
@ -155,15 +147,13 @@ public class NetworkModule extends AbstractModule {
protected void configure() {
bind(NetworkService.class).toInstance(networkService);
bind(NamedWriteableRegistry.class).toInstance(namedWriteableRegistry);
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT);
String defaultTransport = DiscoveryNode.isLocalNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT;
transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, defaultTransport);
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, "default");
transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
if (transportClient == false) {
if (HTTP_ENABLED.get(settings)) {
bind(HttpServer.class).asEagerSingleton();
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_SETTING.getKey(), NETTY_TRANSPORT);
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_SETTING.getKey(), HTTP_DEFAULT_TYPE_SETTING.get(settings));
} else {
bind(HttpServer.class).toProvider(Providers.of(null));
}
@ -185,4 +175,8 @@ public class NetworkModule extends AbstractModule {
AllocateStalePrimaryAllocationCommand.COMMAND_NAME_FIELD);
}
public boolean canRegisterHttpExtensions() {
return transportClient == false;
}
}

View File

@ -61,7 +61,6 @@ import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.IndexingMemoryController;
@ -92,7 +91,6 @@ import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import org.elasticsearch.transport.netty.NettyTransport;
import org.elasticsearch.tribe.TribeService;
import org.elasticsearch.watcher.ResourceWatcherService;
@ -216,6 +214,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
GatewayService.RECOVER_AFTER_NODES_SETTING,
GatewayService.RECOVER_AFTER_TIME_SETTING,
NetworkModule.HTTP_ENABLED,
NetworkModule.HTTP_DEFAULT_TYPE_SETTING,
NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING,
NetworkModule.HTTP_TYPE_SETTING,
NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING,
NetworkModule.TRANSPORT_TYPE_SETTING,
@ -241,18 +241,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,
NettyHttpServerTransport.SETTING_HTTP_NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
NettyHttpServerTransport.SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
NettyHttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE,
NettyHttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MIN,
NettyHttpServerTransport.SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_MAX,
NettyHttpServerTransport.SETTING_HTTP_WORKER_COUNT,
NettyHttpServerTransport.SETTING_HTTP_TCP_NO_DELAY,
NettyHttpServerTransport.SETTING_HTTP_TCP_KEEP_ALIVE,
NettyHttpServerTransport.SETTING_HTTP_TCP_BLOCKING_SERVER,
NettyHttpServerTransport.SETTING_HTTP_TCP_REUSE_ADDRESS,
NettyHttpServerTransport.SETTING_HTTP_TCP_SEND_BUFFER_SIZE,
NettyHttpServerTransport.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
@ -278,7 +266,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
TransportSettings.BIND_HOST,
TransportSettings.PUBLISH_PORT,
TransportSettings.PORT,
NettyTransport.WORKER_COUNT,
TcpTransport.CONNECTIONS_PER_NODE_RECOVERY,
TcpTransport.CONNECTIONS_PER_NODE_BULK,
TcpTransport.CONNECTIONS_PER_NODE_REG,
@ -287,13 +274,7 @@ public final class ClusterSettings extends AbstractScopedSettings {
TcpTransport.PING_SCHEDULE,
TcpTransport.TCP_BLOCKING_CLIENT,
TcpTransport.TCP_CONNECT_TIMEOUT,
NettyTransport.NETTY_MAX_CUMULATION_BUFFER_CAPACITY,
NettyTransport.NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS,
NettyTransport.NETTY_RECEIVE_PREDICTOR_SIZE,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MIN,
NettyTransport.NETTY_RECEIVE_PREDICTOR_MAX,
NetworkService.NETWORK_SERVER,
NettyTransport.NETTY_BOSS_COUNT,
TcpTransport.TCP_NO_DELAY,
TcpTransport.TCP_KEEP_ALIVE,
TcpTransport.TCP_REUSE_ADDRESS,
@ -360,8 +341,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
Node.NODE_NAME_SETTING,
Node.NODE_DATA_SETTING,
Node.NODE_MASTER_SETTING,
Node.NODE_LOCAL_SETTING,
Node.NODE_MODE_SETTING,
Node.NODE_INGEST_SETTING,
Node.NODE_ATTRIBUTES,
Node.NODE_LOCAL_STORAGE_SETTING,

View File

@ -1,88 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.settings.loader;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.Closeable;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.Properties;
import java.util.function.Supplier;
/**
* Settings loader that loads (parses) the settings in a properties format.
*/
public class PropertiesSettingsLoader implements SettingsLoader {
@Override
public Map<String, String> load(String source) throws IOException {
return load(() -> new FastStringReader(source), (reader, props) -> props.load(reader));
}
@Override
public Map<String, String> load(byte[] source) throws IOException {
return load(() -> StreamInput.wrap(source), (inStream, props) -> props.load(inStream));
}
private <T extends Closeable> Map<String, String> load(
Supplier<T> supplier,
IOExceptionThrowingBiConsumer<T, Properties> properties
) throws IOException {
T t = null;
try {
t = supplier.get();
final Properties props = new NoDuplicatesProperties();
properties.accept(t, props);
final Map<String, String> result = new HashMap<>();
for (Map.Entry entry : props.entrySet()) {
result.put((String) entry.getKey(), (String) entry.getValue());
}
return result;
} finally {
IOUtils.closeWhileHandlingException(t);
}
}
@FunctionalInterface
private interface IOExceptionThrowingBiConsumer<T, U> {
void accept(T t, U u) throws IOException;
}
class NoDuplicatesProperties extends Properties {
@Override
public synchronized Object put(Object key, Object value) {
final Object previousValue = super.put(key, value);
if (previousValue != null) {
throw new ElasticsearchParseException(
"duplicate settings key [{}] found, previous value [{}], current value [{}]",
key,
previousValue,
value
);
}
return previousValue;
}
}
}

View File

@ -48,11 +48,8 @@ public final class SettingsLoaderFactory {
return new JsonSettingsLoader(false);
} else if (resourceName.endsWith(".yml") || resourceName.endsWith(".yaml")) {
return new YamlSettingsLoader(false);
} else if (resourceName.endsWith(".properties")) {
return new PropertiesSettingsLoader();
} else {
// lets default to the json one
return new JsonSettingsLoader(false);
throw new IllegalArgumentException("unable to detect content type from resource name [" + resourceName + "]");
}
}
@ -72,11 +69,11 @@ public final class SettingsLoaderFactory {
public static SettingsLoader loaderFromSource(String source) {
if (source.indexOf('{') != -1 && source.indexOf('}') != -1) {
return new JsonSettingsLoader(true);
}
if (source.indexOf(':') != -1) {
} else if (source.indexOf(':') != -1) {
return new YamlSettingsLoader(true);
} else {
throw new IllegalArgumentException("unable to detect content type from source [" + source + "]");
}
return new PropertiesSettingsLoader();
}
}

View File

@ -39,8 +39,8 @@ public class NetworkExceptionHelper {
return true;
}
if (e.getMessage() != null) {
// UGLY!, this exception messages seems to represent closed connection
if (e.getMessage().contains("Connection reset by peer")) {
// UGLY!, this exception messages seems to represent closed connection
if (e.getMessage().contains("Connection reset")) {
return true;
}
if (e.getMessage().contains("connection was aborted")) {

View File

@ -72,7 +72,7 @@ public abstract class ExtensionPoint {
*/
public static class ClassMap<T> extends ExtensionPoint {
protected final Class<T> extensionClass;
private final Map<String, Class<? extends T>> extensions = new HashMap<>();
protected final Map<String, Class<? extends T>> extensions = new HashMap<>();
private final Set<String> reservedKeys;
/**
@ -147,7 +147,8 @@ public abstract class ExtensionPoint {
}
final Class<? extends T> instance = getExtension(type);
if (instance == null) {
throw new IllegalArgumentException("Unknown [" + this.name + "] type [" + type + "]");
throw new IllegalArgumentException("Unknown [" + this.name + "] type [" + type + "] possible values: "
+ extensions.keySet());
}
if (extensionClass == instance) {
binder.bind(extensionClass).asEagerSingleton();

View File

@ -47,7 +47,7 @@ import java.util.function.Function;
public class DiscoveryModule extends AbstractModule {
public static final Setting<String> DISCOVERY_TYPE_SETTING =
new Setting<>("discovery.type", settings -> DiscoveryNode.isLocalNode(settings) ? "local" : "zen", Function.identity(),
new Setting<>("discovery.type", "zen", Function.identity(),
Property.NodeScope);
public static final Setting<String> ZEN_MASTER_SERVICE_TYPE_SETTING =
new Setting<>("discovery.zen.masterservice.type", "zen", Function.identity(), Property.NodeScope);

View File

@ -25,6 +25,9 @@ import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlocks;
@ -79,13 +82,11 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.BiFunction;
import java.util.stream.Collectors;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
/**
*
*/
public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider {
public static final Setting<TimeValue> PING_TIMEOUT_SETTING =
@ -148,6 +149,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
// must initialized in doStart(), when we have the allocationService set
private volatile NodeJoinController nodeJoinController;
private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
@Inject
public ZenDiscovery(Settings settings, ThreadPool threadPool,
@ -216,6 +218,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
joinThreadControl.start();
pingService.start();
this.nodeJoinController = new NodeJoinController(clusterService, allocationService, electMaster, discoverySettings, settings);
this.nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, electMaster, this::rejoin, logger);
}
@Override
@ -500,43 +503,119 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
}
}
// visible for testing
static class NodeRemovalClusterStateTaskExecutor implements ClusterStateTaskExecutor<NodeRemovalClusterStateTaskExecutor.Task>, ClusterStateTaskListener {
private final AllocationService allocationService;
private final ElectMasterService electMasterService;
private final BiFunction<ClusterState, String, ClusterState> rejoin;
private final ESLogger logger;
static class Task {
private final DiscoveryNode node;
private final String reason;
public Task(final DiscoveryNode node, final String reason) {
this.node = node;
this.reason = reason;
}
public DiscoveryNode node() {
return node;
}
public String reason() {
return reason;
}
@Override
public String toString() {
return node + " " + reason;
}
}
NodeRemovalClusterStateTaskExecutor(
final AllocationService allocationService,
final ElectMasterService electMasterService,
final BiFunction<ClusterState, String, ClusterState> rejoin,
final ESLogger logger) {
this.allocationService = allocationService;
this.electMasterService = electMasterService;
this.rejoin = rejoin;
this.logger = logger;
}
@Override
public BatchResult<Task> execute(final ClusterState currentState, final List<Task> tasks) throws Exception {
final DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(currentState.nodes());
boolean removed = false;
for (final Task task : tasks) {
if (currentState.nodes().nodeExists(task.node())) {
remainingNodesBuilder.remove(task.node());
removed = true;
} else {
logger.debug("node [{}] does not exist in cluster state, ignoring", task);
}
}
if (!removed) {
// no nodes to remove, keep the current cluster state
return BatchResult.<Task>builder().successes(tasks).build(currentState);
}
final ClusterState remainingNodesClusterState = remainingNodesClusterState(currentState, remainingNodesBuilder);
final BatchResult.Builder<Task> resultBuilder = BatchResult.<Task>builder().successes(tasks);
if (!electMasterService.hasEnoughMasterNodes(remainingNodesClusterState.nodes())) {
return resultBuilder.build(rejoin.apply(remainingNodesClusterState, "not enough master nodes"));
} else {
final RoutingAllocation.Result routingResult = allocationService.reroute(remainingNodesClusterState, describeTasks(tasks));
return resultBuilder.build(ClusterState.builder(remainingNodesClusterState).routingResult(routingResult).build());
}
}
// visible for testing
// hook is used in testing to ensure that correct cluster state is used to test whether a
// rejoin or reroute is needed
ClusterState remainingNodesClusterState(final ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder) {
return ClusterState.builder(currentState).nodes(remainingNodesBuilder).build();
}
@Override
public void onFailure(final String source, final Exception e) {
logger.error("unexpected failure during [{}]", e, source);
}
@Override
public void onNoLongerMaster(String source) {
logger.debug("no longer master while processing node removal [{}]", source);
}
}
private void removeNode(final DiscoveryNode node, final String source, final String reason) {
clusterService.submitStateUpdateTask(
source + "(" + node + "), reason(" + reason + ")",
new NodeRemovalClusterStateTaskExecutor.Task(node, reason),
ClusterStateTaskConfig.build(Priority.IMMEDIATE),
nodeRemovalExecutor,
nodeRemovalExecutor);
}
private void handleLeaveRequest(final DiscoveryNode node) {
if (lifecycleState() != Lifecycle.State.STARTED) {
// not started, ignore a node failure
return;
}
if (localNodeMaster()) {
clusterService.submitStateUpdateTask("zen-disco-node-left(" + node + ")", new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override
public ClusterState execute(ClusterState currentState) {
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node.getId());
currentState = ClusterState.builder(currentState).nodes(builder).build();
// check if we have enough master nodes, if not, we need to move into joining the cluster again
if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
return rejoin(currentState, "not enough master nodes");
}
// eagerly run reroute to remove dead nodes from routing table
RoutingAllocation.Result routingResult = allocationService.reroute(ClusterState.builder(currentState).build(),
"[" + node + "] left");
return ClusterState.builder(currentState).routingResult(routingResult).build();
}
@Override
public void onNoLongerMaster(String source) {
// ignoring (already logged)
}
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
}
});
removeNode(node, "zen-disco-node-left", "left");
} else if (node.equals(nodes().getMasterNode())) {
handleMasterGone(node, null, "shut_down");
}
}
private void handleNodeFailure(final DiscoveryNode node, String reason) {
private void handleNodeFailure(final DiscoveryNode node, final String reason) {
if (lifecycleState() != Lifecycle.State.STARTED) {
// not started, ignore a node failure
return;
@ -545,41 +624,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
// nothing to do here...
return;
}
clusterService.submitStateUpdateTask("zen-disco-node-failed(" + node + "), reason " + reason,
new ClusterStateUpdateTask(Priority.IMMEDIATE) {
@Override
public ClusterState execute(ClusterState currentState) {
if (currentState.nodes().nodeExists(node) == false) {
logger.debug("node [{}] already removed from cluster state. ignoring.", node);
return currentState;
}
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(currentState.nodes()).remove(node);
currentState = ClusterState.builder(currentState).nodes(builder).build();
// check if we have enough master nodes, if not, we need to move into joining the cluster again
if (!electMaster.hasEnoughMasterNodes(currentState.nodes())) {
return rejoin(currentState, "not enough master nodes");
}
// eagerly run reroute to remove dead nodes from routing table
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(currentState).build(),
"[" + node + "] failed");
return ClusterState.builder(currentState).routingResult(routingResult).build();
}
@Override
public void onNoLongerMaster(String source) {
// already logged
}
@Override
public void onFailure(String source, Exception e) {
logger.error("unexpected failure during [{}]", e, source);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
}
});
removeNode(node, "zen-disco-node-failed", reason);
}
private void handleMinimumMasterNodesChanged(final int minimumMasterNodes) {

View File

@ -633,6 +633,15 @@ public final class NodeEnvironment extends AbstractComponent implements Closeabl
};
}
/**
* A functional interface that people can use to reference {@link #shardLock(ShardId, long)}
*/
@FunctionalInterface
public interface ShardLocker {
ShardLock lock(ShardId shardId, long lockTimeoutMS) throws IOException;
}
/**
* Returns all currently lock shards.
*

View File

@ -27,6 +27,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent;
@ -106,7 +107,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
if (shardState.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
allocation.setHasPendingAsyncFetch();
unassignedIterator.removeAndIgnore();
changed |= unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA);
continue;
}
@ -147,7 +148,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id());
} else {
// we can't really allocate, so ignore it and continue
unassignedIterator.removeAndIgnore();
changed |= unassignedIterator.removeAndIgnore(AllocationStatus.NO_VALID_SHARD_COPY);
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodeShardsResult.allocationsFound);
}
continue;
@ -167,7 +168,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
} else {
// we are throttling this, but we have enough to allocate to this node, ignore it for now
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards);
unassignedIterator.removeAndIgnore();
changed |= unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED);
}
}
return changed;
@ -384,7 +385,8 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
final List<NodeGatewayStartedShards> throttleNodeShards;
final List<NodeGatewayStartedShards> noNodeShards;
public NodesToAllocate(List<NodeGatewayStartedShards> yesNodeShards, List<NodeGatewayStartedShards> throttleNodeShards,
public NodesToAllocate(List<NodeGatewayStartedShards> yesNodeShards,
List<NodeGatewayStartedShards> throttleNodeShards,
List<NodeGatewayStartedShards> noNodeShards) {
this.yesNodeShards = yesNodeShards;
this.throttleNodeShards = throttleNodeShards;

View File

@ -23,7 +23,6 @@ import com.carrotsearch.hppc.ObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongMap;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
@ -31,6 +30,7 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.Nullable;
@ -118,7 +118,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
currentNode, nodeWithHighestMatch);
UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.REALLOCATED_REPLICA,
"existing allocation of replica to [" + currentNode + "] cancelled, sync id match found on node ["+ nodeWithHighestMatch + "]",
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false);
null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), false, UnassignedInfo.AllocationStatus.NO_ATTEMPT);
// don't cancel shard in the loop as it will cause a ConcurrentModificationException
recoveriesToCancel.add(new Tuple<>(shard, unassignedInfo));
changed = true;
@ -150,9 +150,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
// pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
if (canBeAllocatedToAtLeastOneNode(shard, allocation) == false) {
Decision decision = canBeAllocatedToAtLeastOneNode(shard, allocation);
if (decision.type() != Decision.Type.YES) {
logger.trace("{}: ignoring allocation, can't be allocated on any node", shard);
unassignedIterator.removeAndIgnore();
changed |= unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision));
continue;
}
@ -160,7 +161,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
if (shardStores.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard stores", shard);
allocation.setHasPendingAsyncFetch();
unassignedIterator.removeAndIgnore();
changed |= unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA);
continue; // still fetching
}
@ -181,11 +182,11 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
if (matchingNodes.getNodeWithHighestMatch() != null) {
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
// we only check on THROTTLE since we checked before before on NO
Decision decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation);
decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation);
if (decision.type() == Decision.Type.THROTTLE) {
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
// we are throttling this, but we have enough to allocate to this node, ignore it for now
unassignedIterator.removeAndIgnore();
changed |= unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision));
} else {
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
// we found a match
@ -194,7 +195,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
}
} else if (matchingNodes.hasAnyData() == false) {
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed
ignoreUnassignedIfDelayed(unassignedIterator, shard);
changed |= ignoreUnassignedIfDelayed(unassignedIterator, shard);
}
}
return changed;
@ -210,22 +211,25 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
*
* @param unassignedIterator iterator over unassigned shards
* @param shard the shard which might be delayed
* @return true iff there was a change to the unassigned info
*/
public void ignoreUnassignedIfDelayed(RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard) {
public boolean ignoreUnassignedIfDelayed(RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard) {
if (shard.unassignedInfo().isDelayed()) {
logger.debug("{}: allocation of [{}] is delayed", shard.shardId(), shard);
/**
* mark it as changed, since we want to kick a publishing to schedule future allocation,
* see {@link org.elasticsearch.cluster.routing.RoutingService#clusterChanged(ClusterChangedEvent)}).
*/
unassignedIterator.removeAndIgnore();
return unassignedIterator.removeAndIgnore(AllocationStatus.DELAYED_ALLOCATION);
}
return false;
}
/**
* Can the shard be allocated on at least one node based on the allocation deciders.
* Determines if the shard can be allocated on at least one node based on the allocation deciders.
*
* Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one
* node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided
* YES or THROTTLE.
*/
private boolean canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) {
private Decision canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) {
Decision madeDecision = Decision.NO;
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().getDataNodes().values()) {
RoutingNode node = allocation.routingNodes().node(cursor.value.getId());
if (node == null) {
@ -235,10 +239,12 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
// cases for only allocating a replica after a primary
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (decision.type() == Decision.Type.YES) {
return true;
return decision;
} else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) {
madeDecision = decision;
}
}
return false;
return madeDecision;
}
/**

View File

@ -43,6 +43,7 @@ import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.shard.ShardStateMetaData;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -65,18 +66,19 @@ public class TransportNodesListGatewayStartedShards extends
public static final String ACTION_NAME = "internal:gateway/local/started_shards";
private final NodeEnvironment nodeEnv;
private final IndicesService indicesService;
@Inject
public TransportNodesListGatewayStartedShards(Settings settings, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
NodeEnvironment env) {
NodeEnvironment env, IndicesService indicesService) {
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, Request::new, NodeRequest::new, ThreadPool.Names.FETCH_SHARD_STARTED,
NodeGatewayStartedShards.class);
this.nodeEnv = env;
this.indicesService = indicesService;
}
@Override
@ -127,21 +129,24 @@ public class TransportNodesListGatewayStartedShards extends
throw e;
}
ShardPath shardPath = null;
try {
IndexSettings indexSettings = new IndexSettings(metaData, settings);
shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
if (shardPath == null) {
throw new IllegalStateException(shardId + " no shard path found");
if (indicesService.getShardOrNull(shardId) == null) {
// we don't have an open shard on the store, validate the files on disk are openable
ShardPath shardPath = null;
try {
IndexSettings indexSettings = new IndexSettings(metaData, settings);
shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, indexSettings);
if (shardPath == null) {
throw new IllegalStateException(shardId + " no shard path found");
}
Store.tryOpenIndex(shardPath.resolveIndex(), shardId, nodeEnv::shardLock, logger);
} catch (Exception exception) {
logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId,
shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : "");
String allocationId = shardStateMetaData.allocationId != null ?
shardStateMetaData.allocationId.getId() : null;
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.legacyVersion,
allocationId, shardStateMetaData.primary, exception);
}
Store.tryOpenIndex(shardPath.resolveIndex(), shardId, logger);
} catch (Exception exception) {
logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId,
shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : "");
String allocationId = shardStateMetaData.allocationId != null ?
shardStateMetaData.allocationId.getId() : null;
return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.legacyVersion,
allocationId, shardStateMetaData.primary, exception);
}
logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData);
@ -336,5 +341,19 @@ public class TransportNodesListGatewayStartedShards extends
result = 31 * result + (storeException != null ? storeException.hashCode() : 0);
return result;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
buf.append("NodeGatewayStartedShards[")
.append("allocationId=").append(allocationId)
.append(",primary=").append(primary)
.append(",legacyVersion=").append(legacyVersion);
if (storeException != null) {
buf.append(",storeException=").append(storeException);
}
buf.append("]");
return buf.toString();
}
}
}

View File

@ -538,7 +538,8 @@ public class MapperService extends AbstractIndexComponent {
return new DocumentMapperForType(mapper, null);
}
if (!dynamic) {
throw new TypeMissingException(index(), type, "trying to auto create mapping, but dynamic mapping is disabled");
throw new TypeMissingException(index(),
new IllegalStateException("trying to auto create mapping, but dynamic mapping is disabled"), type);
}
mapper = parse(type, null, true);
return new DocumentMapperForType(mapper, mapper.mapping());

View File

@ -69,7 +69,7 @@ import java.util.Objects;
public class NumberFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
// this is private since it has a different default
private static final Setting<Boolean> COERCE_SETTING =
static final Setting<Boolean> COERCE_SETTING =
Setting.boolSetting("index.mapping.coerce", true, Property.IndexScope);
public static class Builder extends FieldMapper.Builder<Builder, NumberFieldMapper> {

View File

@ -0,0 +1,616 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.mapper.core;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.DocValues;
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SortedNumericDocValues;
import org.apache.lucene.search.BoostQuery;
import org.apache.lucene.search.Query;
import org.elasticsearch.action.fieldstats.FieldStats;
import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentParser.Token;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.fielddata.AtomicNumericFieldData;
import org.elasticsearch.index.fielddata.FieldData;
import org.elasticsearch.index.fielddata.IndexFieldData;
import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexNumericFieldData;
import org.elasticsearch.index.fielddata.NumericDoubleValues;
import org.elasticsearch.index.fielddata.ScriptDocValues;
import org.elasticsearch.index.fielddata.SortedBinaryDocValues;
import org.elasticsearch.index.fielddata.SortedNumericDoubleValues;
import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource;
import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData;
import org.elasticsearch.index.mapper.FieldMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.Mapper;
import org.elasticsearch.index.mapper.MapperParsingException;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.core.LegacyNumberFieldMapper.Defaults;
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.search.DocValueFormat;
import org.elasticsearch.search.MultiValueMode;
import org.joda.time.DateTimeZone;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/** A {@link FieldMapper} for scaled floats. Values are internally multiplied
* by a scaling factor and rounded to the closest long. */
public class ScaledFloatFieldMapper extends FieldMapper implements AllFieldMapper.IncludeInAll {
public static final String CONTENT_TYPE = "scaled_float";
// use the same default as numbers
private static final Setting<Boolean> COERCE_SETTING = NumberFieldMapper.COERCE_SETTING;
public static class Builder extends FieldMapper.Builder<Builder, ScaledFloatFieldMapper> {
private boolean scalingFactorSet = false;
private Boolean ignoreMalformed;
private Boolean coerce;
public Builder(String name) {
super(name, new ScaledFloatFieldType(), new ScaledFloatFieldType());
builder = this;
}
public Builder ignoreMalformed(boolean ignoreMalformed) {
this.ignoreMalformed = ignoreMalformed;
return builder;
}
protected Explicit<Boolean> ignoreMalformed(BuilderContext context) {
if (ignoreMalformed != null) {
return new Explicit<>(ignoreMalformed, true);
}
if (context.indexSettings() != null) {
return new Explicit<>(IGNORE_MALFORMED_SETTING.get(context.indexSettings()), false);
}
return Defaults.IGNORE_MALFORMED;
}
public Builder coerce(boolean coerce) {
this.coerce = coerce;
return builder;
}
public Builder scalingFactor(double scalingFactor) {
((ScaledFloatFieldType) fieldType).setScalingFactor(scalingFactor);
scalingFactorSet = true;
return this;
}
protected Explicit<Boolean> coerce(BuilderContext context) {
if (coerce != null) {
return new Explicit<>(coerce, true);
}
if (context.indexSettings() != null) {
return new Explicit<>(COERCE_SETTING.get(context.indexSettings()), false);
}
return Defaults.COERCE;
}
@Override
public ScaledFloatFieldMapper build(BuilderContext context) {
if (scalingFactorSet == false) {
throw new IllegalArgumentException("Field [" + name + "] misses required parameter [scaling_factor]");
}
setupFieldType(context);
ScaledFloatFieldMapper fieldMapper =
new ScaledFloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context),
coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
return (ScaledFloatFieldMapper) fieldMapper.includeInAll(includeInAll);
}
}
public static class TypeParser implements Mapper.TypeParser {
@Override
public Mapper.Builder<?,?> parse(String name, Map<String, Object> node,
ParserContext parserContext) throws MapperParsingException {
Builder builder = new Builder(name);
TypeParsers.parseField(builder, name, node, parserContext);
for (Iterator<Map.Entry<String, Object>> iterator = node.entrySet().iterator(); iterator.hasNext();) {
Map.Entry<String, Object> entry = iterator.next();
String propName = entry.getKey();
Object propNode = entry.getValue();
if (propName.equals("null_value")) {
if (propNode == null) {
throw new MapperParsingException("Property [null_value] cannot be null.");
}
builder.nullValue(NumberFieldMapper.NumberType.DOUBLE.parse(propNode));
iterator.remove();
} else if (propName.equals("ignore_malformed")) {
builder.ignoreMalformed(TypeParsers.nodeBooleanValue("ignore_malformed", propNode, parserContext));
iterator.remove();
} else if (propName.equals("coerce")) {
builder.coerce(TypeParsers.nodeBooleanValue("coerce", propNode, parserContext));
iterator.remove();
} else if (propName.equals("scaling_factor")) {
builder.scalingFactor(NumberFieldMapper.NumberType.DOUBLE.parse(propNode).doubleValue());
iterator.remove();
}
}
return builder;
}
}
public static final class ScaledFloatFieldType extends MappedFieldType {
private double scalingFactor;
public ScaledFloatFieldType() {
super();
setTokenized(false);
setHasDocValues(true);
setOmitNorms(true);
}
ScaledFloatFieldType(ScaledFloatFieldType other) {
super(other);
this.scalingFactor = other.scalingFactor;
}
public double getScalingFactor() {
return scalingFactor;
}
public void setScalingFactor(double scalingFactor) {
checkIfFrozen();
this.scalingFactor = scalingFactor;
}
@Override
public MappedFieldType clone() {
return new ScaledFloatFieldType(this);
}
@Override
public String typeName() {
return CONTENT_TYPE;
}
@Override
public void checkCompatibility(MappedFieldType other, List<String> conflicts, boolean strict) {
super.checkCompatibility(other, conflicts, strict);
if (scalingFactor != ((ScaledFloatFieldType) other).getScalingFactor()) {
conflicts.add("mapper [" + name() + "] has different [scaling_factor] values");
}
}
@Override
public Query termQuery(Object value, QueryShardContext context) {
failIfNotIndexed();
double queryValue = NumberFieldMapper.NumberType.DOUBLE.parse(value).doubleValue();
long scaledValue = Math.round(queryValue * scalingFactor);
Query query = NumberFieldMapper.NumberType.LONG.termQuery(name(), scaledValue);
if (boost() != 1f) {
query = new BoostQuery(query, boost());
}
return query;
}
@Override
public Query termsQuery(List values, QueryShardContext context) {
failIfNotIndexed();
List<Long> scaledValues = new ArrayList<>(values.size());
for (Object value : values) {
double queryValue = NumberFieldMapper.NumberType.DOUBLE.parse(value).doubleValue();
long scaledValue = Math.round(queryValue * scalingFactor);
scaledValues.add(scaledValue);
}
Query query = NumberFieldMapper.NumberType.LONG.termsQuery(name(), Collections.unmodifiableList(scaledValues));
if (boost() != 1f) {
query = new BoostQuery(query, boost());
}
return query;
}
@Override
public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper) {
failIfNotIndexed();
Long lo = null;
if (lowerTerm != null) {
double dValue = NumberFieldMapper.NumberType.DOUBLE.parse(lowerTerm).doubleValue();
if (includeLower == false) {
dValue = Math.nextUp(dValue);
}
lo = Math.round(Math.ceil(dValue * scalingFactor));
}
Long hi = null;
if (lowerTerm != null) {
double dValue = NumberFieldMapper.NumberType.DOUBLE.parse(upperTerm).doubleValue();
if (includeUpper == false) {
dValue = Math.nextDown(dValue);
}
hi = Math.round(Math.floor(dValue * scalingFactor));
}
Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true);
if (boost() != 1f) {
query = new BoostQuery(query, boost());
}
return query;
}
@Override
public FieldStats<?> stats(IndexReader reader) throws IOException {
FieldStats.Long stats = (FieldStats.Long) NumberFieldMapper.NumberType.LONG.stats(
reader, name(), isSearchable(), isAggregatable());
if (stats == null) {
return null;
}
return new FieldStats.Double(stats.getMaxDoc(), stats.getDocCount(),
stats.getSumDocFreq(), stats.getSumTotalTermFreq(),
stats.isSearchable(), stats.isAggregatable(),
stats.getMinValue() == null ? null : stats.getMinValue() / scalingFactor,
stats.getMaxValue() == null ? null : stats.getMaxValue() / scalingFactor);
}
@Override
public IndexFieldData.Builder fielddataBuilder() {
failIfNoDocValues();
return new IndexFieldData.Builder() {
@Override
public IndexFieldData<?> build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache,
CircuitBreakerService breakerService, MapperService mapperService) {
final IndexNumericFieldData scaledValues = (IndexNumericFieldData) new DocValuesIndexFieldData.Builder()
.numericType(IndexNumericFieldData.NumericType.LONG)
.build(indexSettings, fieldType, cache, breakerService, mapperService);
return new ScaledFloatIndexFieldData(scaledValues, scalingFactor);
}
};
}
@Override
public Object valueForSearch(Object value) {
if (value == null) {
return null;
}
return ((Number) value).longValue() / scalingFactor;
}
@Override
public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) {
if (timeZone != null) {
throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName()
+ "] does not support custom time zones");
}
if (format == null) {
return DocValueFormat.RAW;
} else {
return new DocValueFormat.Decimal(format);
}
}
@Override
public boolean equals(Object o) {
if (super.equals(o) == false) {
return false;
}
return scalingFactor == ((ScaledFloatFieldType) o).scalingFactor;
}
@Override
public int hashCode() {
return 31 * super.hashCode() + Double.hashCode(scalingFactor);
}
}
private Boolean includeInAll;
private Explicit<Boolean> ignoreMalformed;
private Explicit<Boolean> coerce;
private ScaledFloatFieldMapper(
String simpleName,
MappedFieldType fieldType,
MappedFieldType defaultFieldType,
Explicit<Boolean> ignoreMalformed,
Explicit<Boolean> coerce,
Settings indexSettings,
MultiFields multiFields,
CopyTo copyTo) {
super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo);
final double scalingFactor = fieldType().getScalingFactor();
if (Double.isFinite(scalingFactor) == false || scalingFactor <= 0) {
throw new IllegalArgumentException("[scaling_factor] must be a positive number, got [" + scalingFactor + "]");
}
this.ignoreMalformed = ignoreMalformed;
this.coerce = coerce;
}
@Override
public ScaledFloatFieldType fieldType() {
return (ScaledFloatFieldType) super.fieldType();
}
@Override
protected String contentType() {
return fieldType.typeName();
}
@Override
protected ScaledFloatFieldMapper clone() {
return (ScaledFloatFieldMapper) super.clone();
}
@Override
public Mapper includeInAll(Boolean includeInAll) {
if (includeInAll != null) {
ScaledFloatFieldMapper clone = clone();
clone.includeInAll = includeInAll;
return clone;
} else {
return this;
}
}
@Override
public Mapper includeInAllIfNotSet(Boolean includeInAll) {
if (includeInAll != null && this.includeInAll == null) {
ScaledFloatFieldMapper clone = clone();
clone.includeInAll = includeInAll;
return clone;
} else {
return this;
}
}
@Override
public Mapper unsetIncludeInAll() {
if (includeInAll != null) {
ScaledFloatFieldMapper clone = clone();
clone.includeInAll = null;
return clone;
} else {
return this;
}
}
@Override
protected void parseCreateField(ParseContext context, List<Field> fields) throws IOException {
XContentParser parser = context.parser();
Object value;
Number numericValue = null;
if (context.externalValueSet()) {
value = context.externalValue();
} else if (parser.currentToken() == Token.VALUE_NULL) {
value = null;
} else if (coerce.value()
&& parser.currentToken() == Token.VALUE_STRING
&& parser.textLength() == 0) {
value = null;
} else {
value = parser.textOrNull();
if (value != null) {
try {
numericValue = NumberFieldMapper.NumberType.DOUBLE.parse(parser, coerce.value());
} catch (IllegalArgumentException e) {
if (ignoreMalformed.value()) {
return;
} else {
throw e;
}
}
}
}
if (value == null) {
value = fieldType().nullValue();
}
if (value == null) {
return;
}
if (numericValue == null) {
numericValue = NumberFieldMapper.NumberType.DOUBLE.parse(value);
}
if (context.includeInAll(includeInAll, this)) {
context.allEntries().addText(fieldType().name(), value.toString(), fieldType().boost());
}
double doubleValue = numericValue.doubleValue();
if (Double.isFinite(doubleValue) == false) {
// since we encode to a long, we have no way to carry NaNs and infinities
throw new IllegalArgumentException("[scaled_float] only supports finite values, but got [" + doubleValue + "]");
}
long scaledValue = Math.round(doubleValue * fieldType().getScalingFactor());
boolean indexed = fieldType().indexOptions() != IndexOptions.NONE;
boolean docValued = fieldType().hasDocValues();
boolean stored = fieldType().stored();
fields.addAll(NumberFieldMapper.NumberType.LONG.createFields(fieldType().name(), scaledValue, indexed, docValued, stored));
}
@Override
protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
super.doMerge(mergeWith, updateAllTypes);
ScaledFloatFieldMapper other = (ScaledFloatFieldMapper) mergeWith;
this.includeInAll = other.includeInAll;
if (other.ignoreMalformed.explicit()) {
this.ignoreMalformed = other.ignoreMalformed;
}
if (other.coerce.explicit()) {
this.coerce = other.coerce;
}
}
@Override
protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException {
super.doXContentBody(builder, includeDefaults, params);
builder.field("scaling_factor", fieldType().getScalingFactor());
if (includeDefaults || ignoreMalformed.explicit()) {
builder.field("ignore_malformed", ignoreMalformed.value());
}
if (includeDefaults || coerce.explicit()) {
builder.field("coerce", coerce.value());
}
if (includeDefaults || fieldType().nullValue() != null) {
builder.field("null_value", fieldType().nullValue());
}
if (includeInAll != null) {
builder.field("include_in_all", includeInAll);
} else if (includeDefaults) {
builder.field("include_in_all", false);
}
}
private static class ScaledFloatIndexFieldData implements IndexNumericFieldData {
private final IndexNumericFieldData scaledFieldData;
private final double scalingFactor;
ScaledFloatIndexFieldData(IndexNumericFieldData scaledFieldData, double scalingFactor) {
this.scaledFieldData = scaledFieldData;
this.scalingFactor = scalingFactor;
}
@Override
public String getFieldName() {
return scaledFieldData.getFieldName();
}
@Override
public AtomicNumericFieldData load(LeafReaderContext context) {
return new ScaledFloatLeafFieldData(scaledFieldData.load(context), scalingFactor);
}
@Override
public AtomicNumericFieldData loadDirect(LeafReaderContext context) throws Exception {
return new ScaledFloatLeafFieldData(scaledFieldData.loadDirect(context), scalingFactor);
}
@Override
public org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource comparatorSource(Object missingValue,
MultiValueMode sortMode, Nested nested) {
return new DoubleValuesComparatorSource(this, missingValue, sortMode, nested);
}
@Override
public void clear() {
scaledFieldData.clear();
}
@Override
public Index index() {
return scaledFieldData.index();
}
@Override
public NumericType getNumericType() {
return scaledFieldData.getNumericType();
}
}
private static class ScaledFloatLeafFieldData implements AtomicNumericFieldData {
private final AtomicNumericFieldData scaledFieldData;
private final double scalingFactorInverse;
ScaledFloatLeafFieldData(AtomicNumericFieldData scaledFieldData, double scalingFactor) {
this.scaledFieldData = scaledFieldData;
this.scalingFactorInverse = 1d / scalingFactor;
}
@Override
public ScriptDocValues.Doubles getScriptValues() {
return new ScriptDocValues.Doubles(getDoubleValues());
}
@Override
public SortedBinaryDocValues getBytesValues() {
return FieldData.toString(getDoubleValues());
}
@Override
public long ramBytesUsed() {
return scaledFieldData.ramBytesUsed();
}
@Override
public void close() {
scaledFieldData.close();
}
@Override
public SortedNumericDocValues getLongValues() {
return FieldData.castToLong(getDoubleValues());
}
@Override
public SortedNumericDoubleValues getDoubleValues() {
final SortedNumericDocValues values = scaledFieldData.getLongValues();
final NumericDocValues singleValues = DocValues.unwrapSingleton(values);
if (singleValues != null) {
return FieldData.singleton(new NumericDoubleValues() {
@Override
public double get(int docID) {
return singleValues.get(docID) * scalingFactorInverse;
}
}, DocValues.unwrapSingletonBits(values));
} else {
return new SortedNumericDoubleValues() {
@Override
public double valueAt(int index) {
return values.valueAt(index) * scalingFactorInverse;
}
@Override
public void setDocument(int doc) {
values.setDocument(doc);
}
@Override
public int count() {
return values.count();
}
};
}
}
}
}

View File

@ -58,9 +58,6 @@ import java.util.Objects;
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeMapValue;
/**
*
*/
public class ParentFieldMapper extends MetadataFieldMapper {
public static final String NAME = "_parent";
@ -98,7 +95,7 @@ public class ParentFieldMapper extends MetadataFieldMapper {
}
public Builder eagerGlobalOrdinals(boolean eagerGlobalOrdinals) {
((ParentFieldType) fieldType()).setEagerGlobalOrdinals(eagerGlobalOrdinals);
fieldType().setEagerGlobalOrdinals(eagerGlobalOrdinals);
return builder;
}
@ -143,8 +140,8 @@ public class ParentFieldMapper extends MetadataFieldMapper {
@Override
public MetadataFieldMapper getDefault(Settings indexSettings, MappedFieldType fieldType, String typeName) {
KeywordFieldMapper parentJoinField = createParentJoinFieldMapper(typeName, new BuilderContext(indexSettings, new ContentPath(0)));
MappedFieldType childJoinFieldType = Defaults.FIELD_TYPE.clone();
childJoinFieldType.setName(joinField(null));
MappedFieldType childJoinFieldType = new ParentFieldType(Defaults.FIELD_TYPE, typeName);
childJoinFieldType.setName(ParentFieldMapper.NAME);
return new ParentFieldMapper(parentJoinField, childJoinFieldType, null, indexSettings);
}
}

View File

@ -19,6 +19,7 @@
package org.elasticsearch.index.query;
import org.apache.lucene.geo.Rectangle;
import org.apache.lucene.search.MatchNoDocsQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
@ -28,6 +29,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Numbers;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.geo.GeoHashUtils;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.io.stream.StreamInput;
@ -155,7 +157,13 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
if (top < bottom) {
throw new IllegalArgumentException("top is below bottom corner: " +
top + " vs. " + bottom);
}
} else if (top == bottom) {
throw new IllegalArgumentException("top cannot be the same as bottom: " +
top + " == " + bottom);
} else if (left == right) {
throw new IllegalArgumentException("left cannot be the same as right: " +
left + " == " + right);
}
// we do not check longitudes as the query generation code can deal with flipped left/right values
}
@ -174,6 +182,16 @@ public class GeoBoundingBoxQueryBuilder extends AbstractQueryBuilder<GeoBounding
return setCorners(topLeft.getLat(), topLeft.getLon(), bottomRight.getLat(), bottomRight.getLon());
}
/**
* Adds points from a single geohash.
* @param geohash The geohash for computing the bounding box.
*/
public GeoBoundingBoxQueryBuilder setCorners(final String geohash) {
// get the bounding box of the geohash and set topLeft and bottomRight
Rectangle ghBBox = GeoHashUtils.bbox(geohash);
return setCorners(new GeoPoint(ghBBox.maxLat, ghBBox.minLon), new GeoPoint(ghBBox.minLat, ghBBox.maxLon));
}
/**
* Adds points.
* @param topLeft topLeft point to add as geohash.

View File

@ -472,9 +472,10 @@ public class HasChildQueryBuilder extends AbstractQueryBuilder<HasChildQueryBuil
@Override
protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException {
QueryBuilder rewrite = query.rewrite(queryRewriteContext);
if (rewrite != query) {
return new HasChildQueryBuilder(type, rewrite, minChildren, minChildren, scoreMode, innerHitBuilder);
QueryBuilder rewrittenQuery = query.rewrite(queryRewriteContext);
if (rewrittenQuery != query) {
InnerHitBuilder rewrittenInnerHit = InnerHitBuilder.rewrite(innerHitBuilder, rewrittenQuery);
return new HasChildQueryBuilder(type, rewrittenQuery, minChildren, maxChildren, scoreMode, rewrittenInnerHit);
}
return this;
}

View File

@ -309,9 +309,10 @@ public class HasParentQueryBuilder extends AbstractQueryBuilder<HasParentQueryBu
@Override
protected QueryBuilder doRewrite(QueryRewriteContext queryShardContext) throws IOException {
QueryBuilder rewrite = query.rewrite(queryShardContext);
if (rewrite != query) {
return new HasParentQueryBuilder(type, rewrite, score, innerHit);
QueryBuilder rewrittenQuery = query.rewrite(queryShardContext);
if (rewrittenQuery != query) {
InnerHitBuilder rewrittenInnerHit = InnerHitBuilder.rewrite(innerHit, rewrittenQuery);
return new HasParentQueryBuilder(type, rewrittenQuery, score, rewrittenInnerHit);
}
return this;
}

View File

@ -722,4 +722,16 @@ public final class InnerHitBuilder extends ToXContentToBytes implements Writeabl
}
}
static InnerHitBuilder rewrite(InnerHitBuilder original, QueryBuilder rewrittenQuery) {
if (original == null) {
return null;
}
InnerHitBuilder copy = new InnerHitBuilder(original);
copy.query = rewrittenQuery;
copy.parentChildType = original.parentChildType;
copy.nestedPath = original.nestedPath;
return copy;
}
}

View File

@ -263,9 +263,10 @@ public class NestedQueryBuilder extends AbstractQueryBuilder<NestedQueryBuilder>
@Override
protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) throws IOException {
QueryBuilder rewrite = query.rewrite(queryRewriteContext);
if (rewrite != query) {
return new NestedQueryBuilder(path, rewrite, scoreMode, innerHitBuilder);
QueryBuilder rewrittenQuery = query.rewrite(queryRewriteContext);
if (rewrittenQuery != query) {
InnerHitBuilder rewrittenInnerHit = InnerHitBuilder.rewrite(innerHitBuilder, rewrittenQuery);
return new NestedQueryBuilder(path, rewrittenQuery, scoreMode, rewrittenInnerHit);
}
return this;
}

View File

@ -30,7 +30,6 @@ import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
import org.elasticsearch.indices.TermsLookup;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.Template;
import java.io.IOException;
import java.util.Collection;
@ -621,27 +620,6 @@ public abstract class QueryBuilders {
return new WrapperQueryBuilder(source);
}
/**
* Facilitates creating template query requests using an inline script
*/
public static TemplateQueryBuilder templateQuery(Template template) {
return new TemplateQueryBuilder(template);
}
/**
* Facilitates creating template query requests using an inline script
*/
public static TemplateQueryBuilder templateQuery(String template, Map<String, Object> vars) {
return new TemplateQueryBuilder(new Template(template, ScriptService.ScriptType.INLINE, null, null, vars));
}
/**
* Facilitates creating template query requests
*/
public static TemplateQueryBuilder templateQuery(String template, ScriptService.ScriptType templateType, Map<String, Object> vars) {
return new TemplateQueryBuilder(new Template(template, templateType, null, null, vars));
}
/**
* A filter based on doc/mapping type.
*/

View File

@ -25,7 +25,6 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.search.RandomAccessWeight;
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.Bits;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
@ -36,15 +35,12 @@ import org.elasticsearch.script.LeafSearchScript;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.Script.ScriptField;
import org.elasticsearch.script.ScriptContext;
import org.elasticsearch.script.ScriptParameterParser;
import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.script.SearchScript;
import org.elasticsearch.search.lookup.SearchLookup;
import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
@ -97,11 +93,8 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
public static Optional<ScriptQueryBuilder> fromXContent(QueryParseContext parseContext) throws IOException {
XContentParser parser = parseContext.parser();
ScriptParameterParser scriptParameterParser = new ScriptParameterParser();
// also, when caching, since its isCacheable is false, will result in loading all bit set...
Script script = null;
Map<String, Object> params = null;
float boost = AbstractQueryBuilder.DEFAULT_BOOST;
String queryName = null;
@ -116,9 +109,6 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
} else if (token == XContentParser.Token.START_OBJECT) {
if (parseContext.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
script = Script.parse(parser, parseContext.getParseFieldMatcher());
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, PARAMS_FIELD)) {
// TODO remove in 3.0 (here to support old script APIs)
params = parser.map();
} else {
throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]");
}
@ -127,25 +117,14 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder<ScriptQueryBuilder>
queryName = parser.text();
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) {
boost = parser.floatValue();
} else if (!scriptParameterParser.token(currentFieldName, token, parser, parseContext.getParseFieldMatcher())) {
} else if (parseContext.getParseFieldMatcher().match(currentFieldName, ScriptField.SCRIPT)) {
script = Script.parse(parser, parseContext.getParseFieldMatcher());
} else {
throw new ParsingException(parser.getTokenLocation(), "[script] query does not support [" + currentFieldName + "]");
}
}
}
if (script == null) { // Didn't find anything using the new API so try using the old one instead
ScriptParameterValue scriptValue = scriptParameterParser.getDefaultScriptParameterValue();
if (scriptValue != null) {
if (params == null) {
params = new HashMap<>();
}
script = new Script(scriptValue.script(), scriptValue.scriptType(), scriptParameterParser.lang(), params);
}
} else if (params != null) {
throw new ParsingException(parser.getTokenLocation(),
"script params must be specified inside script object in a [script] filter");
}
if (script == null) {
throw new ParsingException(parser.getTokenLocation(), "script must be provided with a [script] filter");
}

View File

@ -22,12 +22,13 @@ package org.elasticsearch.index.query.functionscore;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.plugins.SearchPlugin;
import org.elasticsearch.search.MultiValueMode;
import org.elasticsearch.search.SearchModule;
import java.io.IOException;
import java.util.function.BiFunction;
@ -64,8 +65,7 @@ import java.util.function.BiFunction;
* <p>
* To write a new decay scoring function, create a new class that extends
* {@link DecayFunctionBuilder}, setup a PARSER field with this class, and
* register them both using
* {@link org.elasticsearch.search.SearchModule#registerScoreFunction(Writeable.Reader, ScoreFunctionParser, ParseField)}.
* register them in {@link SearchModule#registerScoreFunctions} or {@link SearchPlugin#getScoreFunctions}
* See {@link GaussDecayFunctionBuilder#PARSER} for an example.
*/
public final class DecayFunctionParser<DFB extends DecayFunctionBuilder<DFB>> implements ScoreFunctionParser<DFB> {

Some files were not shown because too many files have changed in this diff Show More