Merge branch 'master' into wildcard_imports

This commit is contained in:
Ryan Ernst 2015-12-18 12:24:30 -08:00
commit 853e9c0fd1
734 changed files with 17498 additions and 7646 deletions

47
.gitignore vendored
View File

@ -1,37 +1,42 @@
# intellij files
.idea/ .idea/
.gradle/
*.iml *.iml
*.ipr *.ipr
*.iws *.iws
work/
/data/
logs/
.DS_Store
build/
generated-resources/
**/.local*
docs/html/
docs/build.log
/tmp/
backwards/
html_docs
.vagrant/
## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects) # eclipse files
## All files (.project, .classpath, .settings/*) should be generated through Maven which
## will correctly set the classpath based on the declared dependencies and write settings
## files to ensure common coding style across Eclipse and IDEA.
.project .project
.classpath .classpath
eclipse-build eclipse-build
.settings .settings
## netbeans ignores # netbeans files
nb-configuration.xml nb-configuration.xml
nbactions.xml nbactions.xml
dependency-reduced-pom.xml # gradle stuff
.gradle/
build/
generated-resources/
# old patterns specific to maven # maven stuff (to be removed when trunk becomes 4.x)
*-execution-hints.log *-execution-hints.log
target/ target/
dependency-reduced-pom.xml
# testing stuff
**/.local*
.vagrant/
# osx stuff
.DS_Store
# needed in case docs build is run...maybe we can configure doc build to generate files under build?
html_docs
# random old stuff that we should look at the necessity of...
/tmp/
backwards/

View File

@ -290,14 +290,14 @@ The REST tests are run automatically when executing the "gradle check" command.
REST tests use the following command: REST tests use the following command:
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
gradle :distribution:tar:integTest \ gradle :distribution:integ-test-zip:integTest \
-Dtests.class=org.elasticsearch.test.rest.RestIT -Dtests.class=org.elasticsearch.test.rest.RestIT
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
A specific test case can be run with A specific test case can be run with
--------------------------------------------------------------------------- ---------------------------------------------------------------------------
gradle :distribution:tar:integTest \ gradle :distribution:integ-test-zip:integTest \
-Dtests.class=org.elasticsearch.test.rest.RestIT \ -Dtests.class=org.elasticsearch.test.rest.RestIT \
-Dtests.method="test {p0=cat.shards/10_basic/Help}" -Dtests.method="test {p0=cat.shards/10_basic/Help}"
--------------------------------------------------------------------------- ---------------------------------------------------------------------------

View File

@ -97,6 +97,7 @@ subprojects {
// the "value" -quiet is added, separated by a space. This is ok since the javadoc // the "value" -quiet is added, separated by a space. This is ok since the javadoc
// command already adds -quiet, so we are just duplicating it // command already adds -quiet, so we are just duplicating it
// see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959
javadoc.options.encoding='UTF8'
javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet')
} }
} }
@ -108,7 +109,7 @@ subprojects {
ext.projectSubstitutions = [ ext.projectSubstitutions = [
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
"org.elasticsearch:elasticsearch:${version}": ':core', "org.elasticsearch:elasticsearch:${version}": ':core',
"org.elasticsearch:test-framework:${version}": ':test-framework', "org.elasticsearch.test:framework:${version}": ':test:framework',
"org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip',
"org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip',
"org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar', "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar',
@ -130,8 +131,8 @@ subprojects {
// the dependency is added. // the dependency is added.
gradle.projectsEvaluated { gradle.projectsEvaluated {
allprojects { allprojects {
if (project.path == ':test-framework') { if (project.path == ':test:framework') {
// :test-framework:test cannot run before and after :core:test // :test:framework:test cannot run before and after :core:test
return return
} }
configurations.all { configurations.all {
@ -168,6 +169,30 @@ gradle.projectsEvaluated {
// intellij configuration // intellij configuration
allprojects { allprojects {
apply plugin: 'idea' apply plugin: 'idea'
idea {
module {
// same as for the IntelliJ Gradle tooling integration
inheritOutputDirs = false
outputDir = file('build/classes/main')
testOutputDir = file('build/classes/test')
iml {
// fix so that Gradle idea plugin properly generates support for resource folders
// see also https://issues.gradle.org/browse/GRADLE-2975
withXml {
it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/main/resources' }.each {
it.attributes().remove('isTestSource')
it.attributes().put('type', 'java-resource')
}
it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/test/resources' }.each {
it.attributes().remove('isTestSource')
it.attributes().put('type', 'java-test-resource')
}
}
}
}
}
} }
idea { idea {

View File

@ -63,6 +63,7 @@ dependencies {
compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE....
compile 'de.thetaphi:forbiddenapis:2.0' compile 'de.thetaphi:forbiddenapis:2.0'
compile 'com.bmuschko:gradle-nexus-plugin:2.3.1' compile 'com.bmuschko:gradle-nexus-plugin:2.3.1'
compile 'org.apache.rat:apache-rat:0.11'
} }
processResources { processResources {

View File

@ -78,7 +78,7 @@ class RandomizedTestingTask extends DefaultTask {
@Input @Input
String argLine = null String argLine = null
Map<String, String> systemProperties = new HashMap<>() Map<String, Object> systemProperties = new HashMap<>()
PatternFilterable patternSet = new PatternSet() PatternFilterable patternSet = new PatternSet()
RandomizedTestingTask() { RandomizedTestingTask() {
@ -100,7 +100,7 @@ class RandomizedTestingTask extends DefaultTask {
jvmArgs.add(argument) jvmArgs.add(argument)
} }
void systemProperty(String property, String value) { void systemProperty(String property, Object value) {
systemProperties.put(property, value) systemProperties.put(property, value)
} }
@ -245,8 +245,8 @@ class RandomizedTestingTask extends DefaultTask {
exclude(name: excludePattern) exclude(name: excludePattern)
} }
} }
for (Map.Entry<String, String> prop : systemProperties) { for (Map.Entry<String, Object> prop : systemProperties) {
sysproperty key: prop.getKey(), value: prop.getValue() sysproperty key: prop.getKey(), value: prop.getValue().toString()
} }
makeListeners() makeListeners()
} }

View File

@ -0,0 +1,105 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle
import org.apache.tools.ant.BuildException
import org.apache.tools.ant.BuildListener
import org.apache.tools.ant.BuildLogger
import org.apache.tools.ant.DefaultLogger
import org.apache.tools.ant.Project
import org.gradle.api.DefaultTask
import org.gradle.api.GradleException
import org.gradle.api.tasks.Input
import org.gradle.api.tasks.Optional
import org.gradle.api.tasks.TaskAction
import java.nio.charset.Charset
/**
* A task which will run ant commands.
*
* Logging for the task is customizable for subclasses by overriding makeLogger.
*/
public class AntTask extends DefaultTask {
/**
* A buffer that will contain the output of the ant code run,
* if the output was not already written directly to stdout.
*/
public final ByteArrayOutputStream outputBuffer = new ByteArrayOutputStream()
@TaskAction
final void executeTask() {
AntBuilder ant = new AntBuilder()
// remove existing loggers, we add our own
List<BuildLogger> toRemove = new ArrayList<>();
for (BuildListener listener : ant.project.getBuildListeners()) {
if (l instanceof BuildLogger) {
toRemove.add(listener);
}
}
for (BuildLogger listener : toRemove) {
ant.project.removeBuildListener(listener)
}
final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : (logger.isInfoEnabled() ? Project.MSG_INFO : Project.MSG_WARN)
final PrintStream stream = useStdout() ? System.out : new PrintStream(outputBuffer, true, Charset.defaultCharset().name())
BuildLogger antLogger = makeLogger(stream, outputLevel)
ant.project.addBuildListener(antLogger)
try {
runAnt(ant)
} catch (BuildException e) {
// ant failed, so see if we have buffered output to emit, then rethrow the failure
String buffer = outputBuffer.toString()
if (buffer.isEmpty() == false) {
logger.error("=== Ant output ===\n${buffer}")
}
throw e
}
}
/** Runs the doAnt closure. This can be overridden by subclasses instead of having to set a closure. */
protected void runAnt(AntBuilder ant) {
if (doAnt == null) {
throw new GradleException("Missing doAnt for ${name}")
}
doAnt(ant)
}
/** Create the logger the ant runner will use, with the given stream for error/output. */
protected BuildLogger makeLogger(PrintStream stream, int outputLevel) {
return new DefaultLogger(
errorPrintStream: stream,
outputPrintStream: stream,
messageOutputLevel: outputLevel)
}
/**
* Returns true if the ant logger should write to stdout, or false if to the buffer.
* The default implementation writes to the buffer when gradle info logging is disabled.
*/
protected boolean useStdout() {
return logger.isInfoEnabled()
}
}

View File

@ -202,7 +202,7 @@ class BuildPlugin implements Plugin<Project> {
// force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself // force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself
Closure disableTransitiveDeps = { ModuleDependency dep -> Closure disableTransitiveDeps = { ModuleDependency dep ->
if (!(dep instanceof ProjectDependency) && dep.getGroup() != 'org.elasticsearch') { if (!(dep instanceof ProjectDependency) && dep.group.startsWith('org.elasticsearch') == false) {
dep.transitive = false dep.transitive = false
// also create a configuration just for this dependency version, so that later // also create a configuration just for this dependency version, so that later
@ -302,6 +302,7 @@ class BuildPlugin implements Plugin<Project> {
options.compilerArgs << '-profile' << project.compactProfile options.compilerArgs << '-profile' << project.compactProfile
} }
options.encoding = 'UTF-8' options.encoding = 'UTF-8'
//options.incremental = true
} }
} }
} }

View File

@ -60,7 +60,7 @@ public class PluginBuildPlugin extends BuildPlugin {
private static void configureDependencies(Project project) { private static void configureDependencies(Project project) {
project.dependencies { project.dependencies {
provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}"
testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}" testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}"
// we "upgrade" these optional deps to provided for plugins, since they will run // we "upgrade" these optional deps to provided for plugins, since they will run
// with a full elasticsearch server that includes optional deps // with a full elasticsearch server that includes optional deps
provided "com.spatial4j:spatial4j:${project.versions.spatial4j}" provided "com.spatial4j:spatial4j:${project.versions.spatial4j}"

View File

@ -62,11 +62,14 @@ public class ForbiddenPatternsTask extends DefaultTask {
patterns.put('nocommit', /nocommit/) patterns.put('nocommit', /nocommit/)
patterns.put('tab', /\t/) patterns.put('tab', /\t/)
patterns.put('wildcard imports', /^\s*import.*\.\*/) patterns.put('wildcard imports', /^\s*import.*\.\*/)
inputs.property("excludes", filesFilter.excludes)
inputs.property("rules", patterns)
} }
/** Adds a file glob pattern to be excluded */ /** Adds a file glob pattern to be excluded */
public void exclude(String... excludes) { public void exclude(String... excludes) {
this.filesFilter.exclude(excludes) filesFilter.exclude(excludes)
} }
/** Adds a pattern to forbid. T */ /** Adds a pattern to forbid. T */

View File

@ -0,0 +1,122 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.precommit
import org.apache.rat.anttasks.Report
import org.apache.rat.anttasks.SubstringLicenseMatcher
import org.apache.rat.license.SimpleLicenseFamily
import org.elasticsearch.gradle.AntTask
import org.gradle.api.tasks.SourceSet
import java.nio.file.Files
/**
* Checks files for license headers.
* <p>
* This is a port of the apache lucene check
*/
public class LicenseHeadersTask extends AntTask {
LicenseHeadersTask() {
description = "Checks sources for missing, incorrect, or unacceptable license headers"
}
@Override
protected void runAnt(AntBuilder ant) {
ant.project.addTaskDefinition('ratReport', Report)
ant.project.addDataTypeDefinition('substringMatcher', SubstringLicenseMatcher)
ant.project.addDataTypeDefinition('approvedLicense', SimpleLicenseFamily)
// create a file for the log to go to under reports/
File reportDir = new File(project.buildDir, "reports/licenseHeaders")
reportDir.mkdirs()
File reportFile = new File(reportDir, "rat.log")
Files.deleteIfExists(reportFile.toPath())
// run rat, going to the file
ant.ratReport(reportFile: reportFile.absolutePath, addDefaultLicenseMatchers: true) {
// checks all the java sources (allJava)
for (SourceSet set : project.sourceSets) {
for (File dir : set.allJava.srcDirs) {
// sometimes these dirs don't exist, e.g. site-plugin has no actual java src/main...
if (dir.exists()) {
ant.fileset(dir: dir)
}
}
}
// BSD 4-clause stuff (is disallowed below)
// we keep this here, in case someone adds BSD code for some reason, it should never be allowed.
substringMatcher(licenseFamilyCategory: "BSD4 ",
licenseFamilyName: "Original BSD License (with advertising clause)") {
pattern(substring: "All advertising materials")
}
// Apache
substringMatcher(licenseFamilyCategory: "AL ",
licenseFamilyName: "Apache") {
// Apache license (ES)
pattern(substring: "Licensed to Elasticsearch under one or more contributor")
// Apache license (ASF)
pattern(substring: "Licensed to the Apache Software Foundation (ASF) under")
// this is the old-school one under some files
pattern(substring: "Licensed under the Apache License, Version 2.0 (the \"License\")")
}
// Generated resources
substringMatcher(licenseFamilyCategory: "GEN ",
licenseFamilyName: "Generated") {
// parsers generated by antlr
pattern(substring: "ANTLR GENERATED CODE")
}
// approved categories
approvedLicense(familyName: "Apache")
approvedLicense(familyName: "Generated")
}
// check the license file for any errors, this should be fast.
boolean zeroUnknownLicenses = false
boolean foundProblemsWithFiles = false
reportFile.eachLine('UTF-8') { line ->
if (line.startsWith("0 Unknown Licenses")) {
zeroUnknownLicenses = true
}
if (line.startsWith(" !")) {
foundProblemsWithFiles = true
}
}
if (zeroUnknownLicenses == false || foundProblemsWithFiles) {
// print the unapproved license section, usually its all you need to fix problems.
int sectionNumber = 0
reportFile.eachLine('UTF-8') { line ->
if (line.startsWith("*******************************")) {
sectionNumber++
} else {
if (sectionNumber == 2) {
logger.error(line)
}
}
}
throw new IllegalStateException("License header problems were found! Full details: " + reportFile.absolutePath)
}
}
}

View File

@ -34,7 +34,9 @@ class PrecommitTasks {
List<Task> precommitTasks = [ List<Task> precommitTasks = [
configureForbiddenApis(project), configureForbiddenApis(project),
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
project.tasks.create('jarHell', JarHellTask.class)] project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
project.tasks.create('jarHell', JarHellTask.class),
project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)]
// tasks with just tests don't need dependency licenses, so this flag makes adding // tasks with just tests don't need dependency licenses, so this flag makes adding
// the task optional // the task optional

View File

@ -0,0 +1,205 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gradle.precommit
import org.apache.tools.ant.BuildLogger
import org.apache.tools.ant.DefaultLogger
import org.apache.tools.ant.Project
import org.elasticsearch.gradle.AntTask
import org.gradle.api.artifacts.Configuration
import org.gradle.api.file.FileCollection
import java.nio.file.FileVisitResult
import java.nio.file.Files
import java.nio.file.Path
import java.nio.file.SimpleFileVisitor
import java.nio.file.attribute.BasicFileAttributes
/**
* Basic static checking to keep tabs on third party JARs
*/
public class ThirdPartyAuditTask extends AntTask {
// true to be lenient about MISSING CLASSES
private boolean missingClasses;
// patterns for classes to exclude, because we understand their issues
private String[] excludes = new String[0];
ThirdPartyAuditTask() {
dependsOn(project.configurations.testCompile)
description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'"
}
/**
* Set to true to be lenient with missing classes. By default this check will fail if it finds
* MISSING CLASSES. This means the set of jars is incomplete. However, in some cases
* this can be due to intentional exclusions that are well-tested and understood.
*/
public void setMissingClasses(boolean value) {
missingClasses = value;
}
/**
* Returns true if leniency about missing classes is enabled.
*/
public boolean isMissingClasses() {
return missingClasses;
}
/**
* classes that should be excluded from the scan,
* e.g. because we know what sheisty stuff those particular classes are up to.
*/
public void setExcludes(String[] classes) {
for (String s : classes) {
if (s.indexOf('*') != -1) {
throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!")
}
}
excludes = classes;
}
/**
* Returns current list of exclusions.
*/
public String[] getExcludes() {
return excludes;
}
@Override
protected BuildLogger makeLogger(PrintStream stream, int outputLevel) {
return new DefaultLogger(
errorPrintStream: stream,
outputPrintStream: stream,
// ignore passed in outputLevel for now, until we are filtering warning messages
messageOutputLevel: Project.MSG_ERR)
}
@Override
protected void runAnt(AntBuilder ant) {
ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask)
// we only want third party dependencies.
FileCollection jars = project.configurations.testCompile.fileCollection({ dependency ->
dependency.group.startsWith("org.elasticsearch") == false
})
// we don't want provided dependencies, which we have already scanned. e.g. don't
// scan ES core's dependencies for every single plugin
Configuration provided = project.configurations.findByName('provided')
if (provided != null) {
jars -= provided
}
// no dependencies matched, we are done
if (jars.isEmpty()) {
return;
}
// print which jars we are going to scan, always
// this is not the time to try to be succinct! Forbidden will print plenty on its own!
Set<String> names = new HashSet<>()
for (File jar : jars) {
names.add(jar.getName())
}
logger.error("[thirdPartyAudit] Scanning: " + names)
// warn that classes are missing
// TODO: move these to excludes list!
if (missingClasses) {
logger.warn("[thirdPartyAudit] WARNING: CLASSES ARE MISSING! Expect NoClassDefFoundError in bug reports from users!")
}
// TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first,
// and then remove our temp dir afterwards. don't complain: try it yourself.
// we don't use gradle temp dir handling, just google it, or try it yourself.
File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit')
// clean up any previous mess (if we failed), then unzip everything to one directory
ant.delete(dir: tmpDir.getAbsolutePath())
tmpDir.mkdirs()
for (File jar : jars) {
ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath())
}
// convert exclusion class names to binary file names
String[] excludedFiles = new String[excludes.length];
for (int i = 0; i < excludes.length; i++) {
excludedFiles[i] = excludes[i].replace('.', '/') + ".class"
// check if the excluded file exists, if not, sure sign things are outdated
if (! new File(tmpDir, excludedFiles[i]).exists()) {
throw new IllegalStateException("bogus thirdPartyAudit exclusion: '" + excludes[i] + "', not found in any dependency")
}
}
// jarHellReprise
checkSheistyClasses(tmpDir.toPath(), new HashSet<>(Arrays.asList(excludedFiles)));
ant.thirdPartyAudit(internalRuntimeForbidden: true,
failOnUnsupportedJava: false,
failOnMissingClasses: !missingClasses,
classpath: project.configurations.testCompile.asPath) {
fileset(dir: tmpDir, excludes: excludedFiles.join(','))
}
// clean up our mess (if we succeed)
ant.delete(dir: tmpDir.getAbsolutePath())
}
/**
* check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk!
*/
private void checkSheistyClasses(Path root, Set<String> excluded) {
// system.parent = extensions loader.
// note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!).
// but groovy/gradle needs to work at all first!
ClassLoader ext = ClassLoader.getSystemClassLoader().getParent()
assert ext != null
Set<String> sheistySet = new TreeSet<>();
Files.walkFileTree(root, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
String entry = root.relativize(file).toString()
if (entry.endsWith(".class")) {
if (ext.getResource(entry) != null) {
sheistySet.add(entry);
}
}
return FileVisitResult.CONTINUE;
}
});
// check if we are ok
if (sheistySet.isEmpty()) {
return;
}
// leniency against exclusions list
sheistySet.removeAll(excluded);
if (sheistySet.isEmpty()) {
logger.warn("[thirdPartyAudit] WARNING: JAR HELL WITH JDK! Expect insanely hard-to-debug problems!")
} else {
throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet);
}
}
}

View File

@ -33,10 +33,10 @@ class ClusterConfiguration {
int numNodes = 1 int numNodes = 1
@Input @Input
int baseHttpPort = 9400 int httpPort = 0
@Input @Input
int baseTransportPort = 9500 int transportPort = 0
@Input @Input
boolean daemonize = true boolean daemonize = true
@ -55,7 +55,7 @@ class ClusterConfiguration {
@Input @Input
Closure waitCondition = { NodeInfo node, AntBuilder ant -> Closure waitCondition = { NodeInfo node, AntBuilder ant ->
File tmpFile = new File(node.cwd, 'wait.success') File tmpFile = new File(node.cwd, 'wait.success')
ant.get(src: "http://localhost:${node.httpPort()}", ant.get(src: "http://${node.httpUri()}",
dest: tmpFile.toString(), dest: tmpFile.toString(),
ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task
retries: 10) retries: 10)

View File

@ -38,8 +38,10 @@ class ClusterFormationTasks {
/** /**
* Adds dependent tasks to the given task to start and stop a cluster with the given configuration. * Adds dependent tasks to the given task to start and stop a cluster with the given configuration.
*
* Returns an object that will resolve at execution time of the given task to a uri for the cluster.
*/ */
static void setup(Project project, Task task, ClusterConfiguration config) { static Object setup(Project project, Task task, ClusterConfiguration config) {
if (task.getEnabled() == false) { if (task.getEnabled() == false) {
// no need to add cluster formation tasks if the task won't run! // no need to add cluster formation tasks if the task won't run!
return return
@ -55,6 +57,9 @@ class ClusterFormationTasks {
Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks) Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks)
task.dependsOn(wait) task.dependsOn(wait)
// delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests
return "${-> nodes[0].transportUri()}"
} }
/** Adds a dependency on the given distribution */ /** Adds a dependency on the given distribution */
@ -201,16 +206,23 @@ class ClusterFormationTasks {
static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) { static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) {
Map esConfig = [ Map esConfig = [
'cluster.name' : node.clusterName, 'cluster.name' : node.clusterName,
'http.port' : node.httpPort(),
'transport.tcp.port' : node.transportPort(),
'pidfile' : node.pidFile, 'pidfile' : node.pidFile,
'discovery.zen.ping.unicast.hosts': (0..<node.config.numNodes).collect{"127.0.0.1:${node.config.baseTransportPort + it}"}.join(','),
'path.repo' : "${node.homeDir}/repo", 'path.repo' : "${node.homeDir}/repo",
'path.shared_data' : "${node.homeDir}/../", 'path.shared_data' : "${node.homeDir}/../",
// Define a node attribute so we can test that it exists // Define a node attribute so we can test that it exists
'node.testattr' : 'test', 'node.testattr' : 'test',
'repositories.url.allowed_urls' : 'http://snapshot.test*' 'repositories.url.allowed_urls': 'http://snapshot.test*'
] ]
if (node.config.numNodes == 1) {
esConfig['http.port'] = node.config.httpPort
esConfig['transport.tcp.port'] = node.config.transportPort
} else {
// TODO: fix multi node so it doesn't use hardcoded prots
esConfig['http.port'] = 9400 + node.nodeNum
esConfig['transport.tcp.port'] = 9500 + node.nodeNum
esConfig['discovery.zen.ping.unicast.hosts'] = (0..<node.config.numNodes).collect{"localhost:${9500 + it}"}.join(',')
}
esConfig.putAll(node.config.settings) esConfig.putAll(node.config.settings)
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup) Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
@ -400,7 +412,12 @@ class ClusterFormationTasks {
resourceexists { resourceexists {
file(file: node.pidFile.toString()) file(file: node.pidFile.toString())
} }
socket(server: '127.0.0.1', port: node.httpPort()) resourceexists {
file(file: node.httpPortsFile.toString())
}
resourceexists {
file(file: node.transportPortsFile.toString())
}
} }
} }
} }
@ -444,6 +461,8 @@ class ClusterFormationTasks {
logger.error("|-----------------------------------------") logger.error("|-----------------------------------------")
logger.error("| failure marker exists: ${node.failedMarker.exists()}") logger.error("| failure marker exists: ${node.failedMarker.exists()}")
logger.error("| pid file exists: ${node.pidFile.exists()}") logger.error("| pid file exists: ${node.pidFile.exists()}")
logger.error("| http ports file exists: ${node.httpPortsFile.exists()}")
logger.error("| transport ports file exists: ${node.transportPortsFile.exists()}")
// the waitfor failed, so dump any output we got (if info logging this goes directly to stdout) // the waitfor failed, so dump any output we got (if info logging this goes directly to stdout)
logger.error("|\n| [ant output]") logger.error("|\n| [ant output]")
node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") } node.buffer.toString('UTF-8').eachLine { line -> logger.error("| ${line}") }

View File

@ -43,6 +43,12 @@ class NodeInfo {
/** the pid file the node will use */ /** the pid file the node will use */
File pidFile File pidFile
/** a file written by elasticsearch containing the ports of each bound address for http */
File httpPortsFile
/** a file written by elasticsearch containing the ports of each bound address for transport */
File transportPortsFile
/** elasticsearch home dir */ /** elasticsearch home dir */
File homeDir File homeDir
@ -92,6 +98,10 @@ class NodeInfo {
homeDir = homeDir(baseDir, config.distribution) homeDir = homeDir(baseDir, config.distribution)
confDir = confDir(baseDir, config.distribution) confDir = confDir(baseDir, config.distribution)
configFile = new File(confDir, 'elasticsearch.yml') configFile = new File(confDir, 'elasticsearch.yml')
// even for rpm/deb, the logs are under home because we dont start with real services
File logsDir = new File(homeDir, 'logs')
httpPortsFile = new File(logsDir, 'http.ports')
transportPortsFile = new File(logsDir, 'transport.ports')
cwd = new File(baseDir, "cwd") cwd = new File(baseDir, "cwd")
failedMarker = new File(cwd, 'run.failed') failedMarker = new File(cwd, 'run.failed')
startLog = new File(cwd, 'run.log') startLog = new File(cwd, 'run.log')
@ -119,6 +129,7 @@ class NodeInfo {
'JAVA_HOME' : project.javaHome, 'JAVA_HOME' : project.javaHome,
'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc 'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc
] ]
args.add("-Des.tests.portsfile=true")
args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" }) args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" })
for (Map.Entry<String, String> property : System.properties.entrySet()) { for (Map.Entry<String, String> property : System.properties.entrySet()) {
if (property.getKey().startsWith('es.')) { if (property.getKey().startsWith('es.')) {
@ -159,14 +170,14 @@ class NodeInfo {
wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8') wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8')
} }
/** Returns the http port for this node */ /** Returns an address and port suitable for a uri to connect to this node over http */
int httpPort() { String httpUri() {
return config.baseHttpPort + nodeNum return httpPortsFile.readLines("UTF-8").get(0)
} }
/** Returns the transport port for this node */ /** Returns an address and port suitable for a uri to connect to this node over transport protocol */
int transportPort() { String transportUri() {
return config.baseTransportPort + nodeNum return transportPortsFile.readLines("UTF-8").get(0)
} }
/** Returns the directory elasticsearch home is contained in for the given distribution */ /** Returns the directory elasticsearch home is contained in for the given distribution */

View File

@ -57,12 +57,12 @@ public class RestIntegTestTask extends RandomizedTestingTask {
RestSpecHack.configureDependencies(project) RestSpecHack.configureDependencies(project)
project.afterEvaluate { project.afterEvaluate {
dependsOn(RestSpecHack.configureTask(project, includePackaged)) dependsOn(RestSpecHack.configureTask(project, includePackaged))
systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}")
} }
// this must run after all projects have been configured, so we know any project // this must run after all projects have been configured, so we know any project
// references can be accessed as a fully configured // references can be accessed as a fully configured
project.gradle.projectsEvaluated { project.gradle.projectsEvaluated {
ClusterFormationTasks.setup(project, this, clusterConfig) Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig)
systemProperty('tests.cluster', clusterUri)
} }
} }

View File

@ -8,7 +8,7 @@ import org.gradle.util.ConfigureUtil
public class RunTask extends DefaultTask { public class RunTask extends DefaultTask {
ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false) ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false)
public RunTask() { public RunTask() {
description = "Runs elasticsearch with '${project.path}'" description = "Runs elasticsearch with '${project.path}'"

View File

@ -42,7 +42,7 @@ public class StandaloneTestBasePlugin implements Plugin<Project> {
// only setup tests to build // only setup tests to build
project.sourceSets.create('test') project.sourceSets.create('test')
project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}") project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}")
project.eclipse.classpath.sourceSets = [project.sourceSets.test] project.eclipse.classpath.sourceSets = [project.sourceSets.test]
project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime] project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime]

View File

@ -123,3 +123,6 @@ java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a r
java.util.Collections#EMPTY_LIST java.util.Collections#EMPTY_LIST
java.util.Collections#EMPTY_MAP java.util.Collections#EMPTY_MAP
java.util.Collections#EMPTY_SET java.util.Collections#EMPTY_SET
java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness
java.util.Random#<init>() @ Use org.elasticsearch.common.random.Randomness#create for reproducible sources of randomness

View File

@ -82,7 +82,7 @@ dependencies {
compile "net.java.dev.jna:jna:${versions.jna}", optional compile "net.java.dev.jna:jna:${versions.jna}", optional
if (isEclipse == false || project.path == ":core-tests") { if (isEclipse == false || project.path == ":core-tests") {
testCompile("org.elasticsearch:test-framework:${version}") { testCompile("org.elasticsearch.test:framework:${version}") {
// tests use the locally compiled version of core // tests use the locally compiled version of core
exclude group: 'org.elasticsearch', module: 'elasticsearch' exclude group: 'org.elasticsearch', module: 'elasticsearch'
} }
@ -111,6 +111,14 @@ forbiddenPatterns {
exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt' exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt'
} }
// classes are missing, e.g. org.jboss.marshalling.Marshaller
thirdPartyAudit.missingClasses = true
// uses internal sun ssl classes!
thirdPartyAudit.excludes = [
// uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name)
'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator',
]
// dependency license are currently checked in distribution // dependency license are currently checked in distribution
dependencyLicenses.enabled = false dependencyLicenses.enabled = false

View File

@ -54,7 +54,6 @@ public class MapperQueryParser extends QueryParser {
static { static {
Map<String, FieldQueryExtension> fieldQueryExtensions = new HashMap<>(); Map<String, FieldQueryExtension> fieldQueryExtensions = new HashMap<>();
fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension()); fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension());
fieldQueryExtensions.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension());
FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions); FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions);
} }

View File

@ -268,11 +268,15 @@ public class Version {
public static final int V_2_0_1_ID = 2000199; public static final int V_2_0_1_ID = 2000199;
public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_2_ID = 2000299; public static final int V_2_0_2_ID = 2000299;
public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_0_3_ID = 2000399;
public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1);
public static final int V_2_1_0_ID = 2010099; public static final int V_2_1_0_ID = 2010099;
public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_1_ID = 2010199; public static final int V_2_1_1_ID = 2010199;
public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_1_2_ID = 2010299;
public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1);
public static final int V_2_2_0_ID = 2020099; public static final int V_2_2_0_ID = 2020099;
public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0);
public static final int V_3_0_0_ID = 3000099; public static final int V_3_0_0_ID = 3000099;
@ -293,10 +297,14 @@ public class Version {
return V_3_0_0; return V_3_0_0;
case V_2_2_0_ID: case V_2_2_0_ID:
return V_2_2_0; return V_2_2_0;
case V_2_1_2_ID:
return V_2_1_2;
case V_2_1_1_ID: case V_2_1_1_ID:
return V_2_1_1; return V_2_1_1;
case V_2_1_0_ID: case V_2_1_0_ID:
return V_2_1_0; return V_2_1_0;
case V_2_0_3_ID:
return V_2_0_3;
case V_2_0_2_ID: case V_2_0_2_ID:
return V_2_0_2; return V_2_0_2;
case V_2_0_1_ID: case V_2_0_1_ID:

View File

@ -107,6 +107,8 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction;
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction;
@ -293,6 +295,7 @@ public class ActionModule extends AbstractModule {
registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class);
registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class); registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class);
registerAction(FlushAction.INSTANCE, TransportFlushAction.class); registerAction(FlushAction.INSTANCE, TransportFlushAction.class);
registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class);
registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class);
registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class); registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class);
registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class);

View File

@ -0,0 +1,127 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.settings;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import static org.elasticsearch.cluster.ClusterState.builder;
/**
* Updates transient and persistent cluster state settings if there are any changes
* due to the update.
*/
final class SettingsUpdater {
final Settings.Builder transientUpdates = Settings.settingsBuilder();
final Settings.Builder persistentUpdates = Settings.settingsBuilder();
private final ClusterSettings clusterSettings;
SettingsUpdater(ClusterSettings clusterSettings) {
this.clusterSettings = clusterSettings;
}
synchronized Settings getTransientUpdates() {
return transientUpdates.build();
}
synchronized Settings getPersistentUpdate() {
return persistentUpdates.build();
}
synchronized ClusterState updateSettings(final ClusterState currentState, Settings transientToApply, Settings persistentToApply) {
boolean changed = false;
Settings.Builder transientSettings = Settings.settingsBuilder();
transientSettings.put(currentState.metaData().transientSettings());
changed |= apply(transientToApply, transientSettings, transientUpdates, "transient");
Settings.Builder persistentSettings = Settings.settingsBuilder();
persistentSettings.put(currentState.metaData().persistentSettings());
changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent");
if (!changed) {
return currentState;
}
MetaData.Builder metaData = MetaData.builder(currentState.metaData())
.persistentSettings(persistentSettings.build())
.transientSettings(transientSettings.build());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings());
if (updatedReadOnly) {
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
} else {
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
}
ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build();
Settings settings = build.metaData().settings();
// now we try to apply things and if they are invalid we fail
// this dryRun will validate & parse settings but won't actually apply them.
clusterSettings.dryRun(settings);
return build;
}
private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) {
boolean changed = false;
final Set<String> toRemove = new HashSet<>();
Settings.Builder settingsBuilder = Settings.settingsBuilder();
for (Map.Entry<String, String> entry : toApply.getAsMap().entrySet()) {
if (entry.getValue() == null) {
toRemove.add(entry.getKey());
} else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) {
settingsBuilder.put(entry.getKey(), entry.getValue());
updates.put(entry.getKey(), entry.getValue());
changed = true;
} else {
throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable");
}
}
changed |= applyDeletes(toRemove, target);
target.put(settingsBuilder.build());
return changed;
}
private final boolean applyDeletes(Set<String> deletes, Settings.Builder builder) {
boolean changed = false;
for (String entry : deletes) {
Set<String> keysToRemove = new HashSet<>();
Set<String> keySet = builder.internalMap().keySet();
for (String key : keySet) {
if (Regex.simpleMatch(entry, key)) {
keysToRemove.add(key);
}
}
for (String key : keysToRemove) {
builder.remove(key);
changed = true;
}
}
return changed;
}
}

View File

@ -28,23 +28,19 @@ import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.settings.ClusterDynamicSettings;
import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority; import org.elasticsearch.common.Priority;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.Map;
import static org.elasticsearch.cluster.ClusterState.builder; import static org.elasticsearch.cluster.ClusterState.builder;
/** /**
@ -54,15 +50,14 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
private final AllocationService allocationService; private final AllocationService allocationService;
private final DynamicSettings dynamicSettings; private final ClusterSettings clusterSettings;
@Inject @Inject
public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool,
AllocationService allocationService, @ClusterDynamicSettings DynamicSettings dynamicSettings, AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) {
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new);
this.allocationService = allocationService; this.allocationService = allocationService;
this.dynamicSettings = dynamicSettings; this.clusterSettings = clusterSettings;
} }
@Override @Override
@ -73,8 +68,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override @Override
protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) { protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) {
// allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it // allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it
if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && request.persistentSettings().get(MetaData.SETTING_READ_ONLY) != null) || if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) ||
request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && request.transientSettings().get(MetaData.SETTING_READ_ONLY) != null) { request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings())) {
return null; return null;
} }
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE);
@ -88,9 +83,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override @Override
protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) { protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener<ClusterUpdateSettingsResponse> listener) {
final Settings.Builder transientUpdates = Settings.settingsBuilder(); final SettingsUpdater updater = new SettingsUpdater(clusterSettings);
final Settings.Builder persistentUpdates = Settings.settingsBuilder();
clusterService.submitStateUpdateTask("cluster_update_settings", clusterService.submitStateUpdateTask("cluster_update_settings",
new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.IMMEDIATE, request, listener) { new AckedClusterStateUpdateTask<ClusterUpdateSettingsResponse>(Priority.IMMEDIATE, request, listener) {
@ -98,7 +91,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override @Override
protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) {
return new ClusterUpdateSettingsResponse(acknowledged, transientUpdates.build(), persistentUpdates.build()); return new ClusterUpdateSettingsResponse(acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate());
} }
@Override @Override
@ -125,7 +118,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
// so we should *not* execute the reroute. // so we should *not* execute the reroute.
if (!clusterService.state().nodes().localNodeMaster()) { if (!clusterService.state().nodes().localNodeMaster()) {
logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); logger.debug("Skipping reroute after cluster update settings, because node is no longer master");
listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build())); listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()));
return; return;
} }
@ -145,13 +138,13 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override @Override
//we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the update settings was acknowledged //we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the update settings was acknowledged
protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) {
return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, transientUpdates.build(), persistentUpdates.build()); return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate());
} }
@Override @Override
public void onNoLongerMaster(String source) { public void onNoLongerMaster(String source) {
logger.debug("failed to preform reroute after cluster settings were updated - current node is no longer a master"); logger.debug("failed to preform reroute after cluster settings were updated - current node is no longer a master");
listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build())); listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate()));
} }
@Override @Override
@ -181,58 +174,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct
@Override @Override
public ClusterState execute(final ClusterState currentState) { public ClusterState execute(final ClusterState currentState) {
Settings.Builder transientSettings = Settings.settingsBuilder(); ClusterState clusterState = updater.updateSettings(currentState, request.transientSettings(), request.persistentSettings());
transientSettings.put(currentState.metaData().transientSettings()); changed = clusterState != currentState;
for (Map.Entry<String, String> entry : request.transientSettings().getAsMap().entrySet()) { return clusterState;
if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) {
String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state());
if (error == null) {
transientSettings.put(entry.getKey(), entry.getValue());
transientUpdates.put(entry.getKey(), entry.getValue());
changed = true;
} else {
logger.warn("ignoring transient setting [{}], [{}]", entry.getKey(), error);
}
} else {
logger.warn("ignoring transient setting [{}], not dynamically updateable", entry.getKey());
}
}
Settings.Builder persistentSettings = Settings.settingsBuilder();
persistentSettings.put(currentState.metaData().persistentSettings());
for (Map.Entry<String, String> entry : request.persistentSettings().getAsMap().entrySet()) {
if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) {
String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state());
if (error == null) {
persistentSettings.put(entry.getKey(), entry.getValue());
persistentUpdates.put(entry.getKey(), entry.getValue());
changed = true;
} else {
logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error);
}
} else {
logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey());
}
}
if (!changed) {
return currentState;
}
MetaData.Builder metaData = MetaData.builder(currentState.metaData())
.persistentSettings(persistentSettings.build())
.transientSettings(transientSettings.build());
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
boolean updatedReadOnly = metaData.persistentSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || metaData.transientSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false);
if (updatedReadOnly) {
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
} else {
blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
}
return builder(currentState).metaData(metaData).blocks(blocks).build();
} }
}); });
} }
} }

View File

@ -33,6 +33,8 @@ import org.elasticsearch.script.ScriptService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.Collections;
public class TransportRenderSearchTemplateAction extends HandledTransportAction<RenderSearchTemplateRequest, RenderSearchTemplateResponse> { public class TransportRenderSearchTemplateAction extends HandledTransportAction<RenderSearchTemplateRequest, RenderSearchTemplateResponse> {
private final ScriptService scriptService; private final ScriptService scriptService;
@ -55,7 +57,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction<
@Override @Override
protected void doRun() throws Exception { protected void doRun() throws Exception {
ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request); ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.emptyMap());
BytesReference processedTemplate = (BytesReference) executable.run(); BytesReference processedTemplate = (BytesReference) executable.run();
RenderSearchTemplateResponse response = new RenderSearchTemplateResponse(); RenderSearchTemplateResponse response = new RenderSearchTemplateResponse();
response.source(processedTemplate); response.source(processedTemplate);

View File

@ -31,31 +31,36 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
/** /**
* Close index action * Close index action
*/ */
public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, CloseIndexResponse> implements NodeSettingsService.Listener { public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, CloseIndexResponse> {
private final MetaDataIndexStateService indexStateService; private final MetaDataIndexStateService indexStateService;
private final DestructiveOperations destructiveOperations; private final DestructiveOperations destructiveOperations;
private volatile boolean closeIndexEnabled; private volatile boolean closeIndexEnabled;
public static final String SETTING_CLUSTER_INDICES_CLOSE_ENABLE = "cluster.indices.close.enable"; public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER);
@Inject @Inject
public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataIndexStateService indexStateService, ThreadPool threadPool, MetaDataIndexStateService indexStateService,
NodeSettingsService nodeSettingsService, ActionFilters actionFilters, ClusterSettings clusterSettings, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) {
super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest::new); super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest::new);
this.indexStateService = indexStateService; this.indexStateService = indexStateService;
this.destructiveOperations = destructiveOperations; this.destructiveOperations = destructiveOperations;
this.closeIndexEnabled = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, true); this.closeIndexEnabled = CLUSTER_INDICES_CLOSE_ENABLE_SETTING.get(settings);
nodeSettingsService.addListener(this); clusterSettings.addSettingsUpdateConsumer(CLUSTER_INDICES_CLOSE_ENABLE_SETTING, this::setCloseIndexEnabled);
}
private void setCloseIndexEnabled(boolean closeIndexEnabled) {
this.closeIndexEnabled = closeIndexEnabled;
} }
@Override @Override
@ -73,7 +78,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
protected void doExecute(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) { protected void doExecute(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) {
destructiveOperations.failDestructive(request.indices()); destructiveOperations.failDestructive(request.indices());
if (closeIndexEnabled == false) { if (closeIndexEnabled == false) {
throw new IllegalStateException("closing indices is disabled - set [" + SETTING_CLUSTER_INDICES_CLOSE_ENABLE + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace");
} }
super.doExecute(request, listener); super.doExecute(request, listener);
} }
@ -104,13 +109,4 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
} }
}); });
} }
@Override
public void onRefreshSettings(Settings settings) {
final boolean enable = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, this.closeIndexEnabled);
if (enable != this.closeIndexEnabled) {
logger.info("updating [{}] from [{}] to [{}]", SETTING_CLUSTER_INDICES_CLOSE_ENABLE, this.closeIndexEnabled, enable);
this.closeIndexEnabled = enable;
}
}
} }

View File

@ -31,7 +31,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -45,8 +44,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
@Inject @Inject
public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService, ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService, ActionFilters actionFilters,
NodeSettingsService nodeSettingsService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) {
super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteIndexRequest::new); super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, DeleteIndexRequest::new);
this.deleteIndexService = deleteIndexService; this.deleteIndexService = deleteIndexService;

View File

@ -17,26 +17,28 @@
* under the License. * under the License.
*/ */
package org.apache.lucene.queryparser.classic; package org.elasticsearch.action.admin.indices.flush;
import org.apache.lucene.search.ConstantScoreQuery; import org.elasticsearch.action.Action;
import org.apache.lucene.search.Query; import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.index.query.MissingQueryBuilder;
import org.elasticsearch.index.query.QueryShardContext;
/**
*
*/
public class MissingFieldQueryExtension implements FieldQueryExtension {
public static final String NAME = "_missing_"; public class SyncedFlushAction extends Action<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
public static final SyncedFlushAction INSTANCE = new SyncedFlushAction();
public static final String NAME = "indices:admin/synced_flush";
private SyncedFlushAction() {
super(NAME);
}
@Override @Override
public Query query(QueryShardContext context, String queryText) { public SyncedFlushResponse newResponse() {
Query query = MissingQueryBuilder.newFilter(context, queryText, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE, MissingQueryBuilder.DEFAULT_NULL_VALUE); return new SyncedFlushResponse();
if (query != null) {
return new ConstantScoreQuery(query);
} }
return null;
@Override
public SyncedFlushRequestBuilder newRequestBuilder(ElasticsearchClient client) {
return new SyncedFlushRequestBuilder(client, this);
} }
} }

View File

@ -0,0 +1,64 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import java.util.Arrays;
/**
* A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush
* and writes the same sync id to primary and all copies.
*
* <p>Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}. </p>
*
* @see org.elasticsearch.client.Requests#flushRequest(String...)
* @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
* @see SyncedFlushResponse
*/
public class SyncedFlushRequest extends BroadcastRequest<SyncedFlushRequest> {
public SyncedFlushRequest() {
}
/**
* Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument.
* The new request will inherit though headers and context from the original request that caused it.
*/
public SyncedFlushRequest(ActionRequest originalRequest) {
super(originalRequest);
}
/**
* Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will
* be sync flushed.
*/
public SyncedFlushRequest(String... indices) {
super(indices);
}
@Override
public String toString() {
return "SyncedFlushRequest{" +
"indices=" + Arrays.toString(indices) + "}";
}
}

View File

@ -17,35 +17,25 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.rest; package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.rest.action.RestActionModule; import org.elasticsearch.client.ElasticsearchClient;
import java.util.ArrayList; public class SyncedFlushRequestBuilder extends ActionRequestBuilder<SyncedFlushRequest, SyncedFlushResponse, SyncedFlushRequestBuilder> {
import java.util.List;
/** public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) {
* super(client, action, new SyncedFlushRequest());
*/
public class RestModule extends AbstractModule {
private final Settings settings;
private List<Class<? extends BaseRestHandler>> restPluginsActions = new ArrayList<>();
public void addRestAction(Class<? extends BaseRestHandler> restAction) {
restPluginsActions.add(restAction);
} }
public RestModule(Settings settings) { public SyncedFlushRequestBuilder setIndices(String[] indices) {
this.settings = settings; super.request().indices(indices);
return this;
} }
public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
@Override super.request().indicesOptions(indicesOptions);
protected void configure() { return this;
bind(RestController.class).asEagerSingleton();
new RestActionModule(restPluginsActions).configure(binder());
} }
} }

View File

@ -16,16 +16,25 @@
* specific language governing permissions and limitations * specific language governing permissions and limitations
* under the License. * under the License.
*/ */
package org.elasticsearch.indices.flush; package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.indices.flush.ShardsSyncedFlushResult;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -34,13 +43,16 @@ import static java.util.Collections.unmodifiableMap;
/** /**
* The result of performing a sync flush operation on all shards of multiple indices * The result of performing a sync flush operation on all shards of multiple indices
*/ */
public class IndicesSyncedFlushResult implements ToXContent { public class SyncedFlushResponse extends ActionResponse implements ToXContent {
final Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex; Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex;
final ShardCounts shardCounts; ShardCounts shardCounts;
SyncedFlushResponse() {
public IndicesSyncedFlushResult(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) { }
public SyncedFlushResponse(Map<String, List<ShardsSyncedFlushResult>> shardsResultPerIndex) {
// shardsResultPerIndex is never modified after it is passed to this // shardsResultPerIndex is never modified after it is passed to this
// constructor so this is safe even though shardsResultPerIndex is a // constructor so this is safe even though shardsResultPerIndex is a
// ConcurrentHashMap // ConcurrentHashMap
@ -48,17 +60,23 @@ public class IndicesSyncedFlushResult implements ToXContent {
this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values())); this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values()));
} }
/** total number shards, including replicas, both assigned and unassigned */ /**
* total number shards, including replicas, both assigned and unassigned
*/
public int totalShards() { public int totalShards() {
return shardCounts.total; return shardCounts.total;
} }
/** total number of shards for which the operation failed */ /**
* total number of shards for which the operation failed
*/
public int failedShards() { public int failedShards() {
return shardCounts.failed; return shardCounts.failed;
} }
/** total number of shards which were successfully sync-flushed */ /**
* total number of shards which were successfully sync-flushed
*/
public int successfulShards() { public int successfulShards() {
return shardCounts.successful; return shardCounts.successful;
} }
@ -91,8 +109,8 @@ public class IndicesSyncedFlushResult implements ToXContent {
builder.endObject(); builder.endObject();
continue; continue;
} }
Map<ShardRouting, SyncedFlushService.SyncedFlushResponse> failedShards = shardResults.failedShards(); Map<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> failedShards = shardResults.failedShards();
for (Map.Entry<ShardRouting, SyncedFlushService.SyncedFlushResponse> shardEntry : failedShards.entrySet()) { for (Map.Entry<ShardRouting, SyncedFlushService.ShardSyncedFlushResponse> shardEntry : failedShards.entrySet()) {
builder.startObject(); builder.startObject();
builder.field(Fields.SHARD, shardResults.shardId().id()); builder.field(Fields.SHARD, shardResults.shardId().id());
builder.field(Fields.REASON, shardEntry.getValue().failureReason()); builder.field(Fields.REASON, shardEntry.getValue().failureReason());
@ -123,11 +141,11 @@ public class IndicesSyncedFlushResult implements ToXContent {
return new ShardCounts(total, successful, failed); return new ShardCounts(total, successful, failed);
} }
static final class ShardCounts implements ToXContent { static final class ShardCounts implements ToXContent, Streamable {
public final int total; public int total;
public final int successful; public int successful;
public final int failed; public int failed;
ShardCounts(int total, int successful, int failed) { ShardCounts(int total, int successful, int failed) {
this.total = total; this.total = total;
@ -135,6 +153,10 @@ public class IndicesSyncedFlushResult implements ToXContent {
this.failed = failed; this.failed = failed;
} }
ShardCounts() {
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.TOTAL, total); builder.field(Fields.TOTAL, total);
@ -142,6 +164,20 @@ public class IndicesSyncedFlushResult implements ToXContent {
builder.field(Fields.FAILED, failed); builder.field(Fields.FAILED, failed);
return builder; return builder;
} }
@Override
public void readFrom(StreamInput in) throws IOException {
total = in.readInt();
successful = in.readInt();
failed = in.readInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeInt(total);
out.writeInt(successful);
out.writeInt(failed);
}
} }
static final class Fields { static final class Fields {
@ -154,4 +190,37 @@ public class IndicesSyncedFlushResult implements ToXContent {
static final XContentBuilderString ROUTING = new XContentBuilderString("routing"); static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
static final XContentBuilderString REASON = new XContentBuilderString("reason"); static final XContentBuilderString REASON = new XContentBuilderString("reason");
} }
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardCounts = new ShardCounts();
shardCounts.readFrom(in);
Map<String, List<ShardsSyncedFlushResult>> tmpShardsResultPerIndex = new HashMap<>();
int numShardsResults = in.readInt();
for (int i =0 ; i< numShardsResults; i++) {
String index = in.readString();
List<ShardsSyncedFlushResult> shardsSyncedFlushResults = new ArrayList<>();
int numShards = in.readInt();
for (int j =0; j< numShards; j++) {
shardsSyncedFlushResults.add(ShardsSyncedFlushResult.readShardsSyncedFlushResult(in));
}
tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults);
}
shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
shardCounts.writeTo(out);
out.writeInt(shardsResultPerIndex.size());
for (Map.Entry<String, List<ShardsSyncedFlushResult>> entry : shardsResultPerIndex.entrySet()) {
out.writeString(entry.getKey());
out.writeInt(entry.getValue().size());
for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) {
shardsSyncedFlushResult.writeTo(out);
}
}
}
} }

View File

@ -0,0 +1,52 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.flush.SyncedFlushService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
/**
* Synced flush Action.
*/
public class TransportSyncedFlushAction extends HandledTransportAction<SyncedFlushRequest, SyncedFlushResponse> {
SyncedFlushService syncedFlushService;
@Inject
public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool,
TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
SyncedFlushService syncedFlushService) {
super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SyncedFlushRequest::new);
this.syncedFlushService = syncedFlushService;
}
@Override
protected void doExecute(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener);
}
}

View File

@ -32,12 +32,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.Arrays;
/** /**
* Open index action * Open index action
*/ */
@ -49,7 +46,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
@Inject @Inject
public TransportOpenIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, public TransportOpenIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, MetaDataIndexStateService indexStateService, ThreadPool threadPool, MetaDataIndexStateService indexStateService,
NodeSettingsService nodeSettingsService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
DestructiveOperations destructiveOperations) { DestructiveOperations destructiveOperations) {
super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, OpenIndexRequest::new); super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, OpenIndexRequest::new);
this.indexStateService = indexStateService; this.indexStateService = indexStateService;

View File

@ -56,13 +56,14 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> { public static class StoreStatus implements Streamable, ToXContent, Comparable<StoreStatus> {
private DiscoveryNode node; private DiscoveryNode node;
private long version; private long version;
private String allocationId;
private Throwable storeException; private Throwable storeException;
private Allocation allocation; private AllocationStatus allocationStatus;
/** /**
* The status of the shard store with respect to the cluster * The status of the shard store with respect to the cluster
*/ */
public enum Allocation { public enum AllocationStatus {
/** /**
* Allocated as primary * Allocated as primary
@ -81,16 +82,16 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
private final byte id; private final byte id;
Allocation(byte id) { AllocationStatus(byte id) {
this.id = id; this.id = id;
} }
private static Allocation fromId(byte id) { private static AllocationStatus fromId(byte id) {
switch (id) { switch (id) {
case 0: return PRIMARY; case 0: return PRIMARY;
case 1: return REPLICA; case 1: return REPLICA;
case 2: return UNUSED; case 2: return UNUSED;
default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
} }
} }
@ -99,11 +100,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
case 0: return "primary"; case 0: return "primary";
case 1: return "replica"; case 1: return "replica";
case 2: return "unused"; case 2: return "unused";
default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]");
} }
} }
private static Allocation readFrom(StreamInput in) throws IOException { private static AllocationStatus readFrom(StreamInput in) throws IOException {
return fromId(in.readByte()); return fromId(in.readByte());
} }
@ -115,10 +116,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
private StoreStatus() { private StoreStatus() {
} }
public StoreStatus(DiscoveryNode node, long version, Allocation allocation, Throwable storeException) { public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) {
this.node = node; this.node = node;
this.version = version; this.version = version;
this.allocation = allocation; this.allocationId = allocationId;
this.allocationStatus = allocationStatus;
this.storeException = storeException; this.storeException = storeException;
} }
@ -130,13 +132,20 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
} }
/** /**
* Version of the store, used to select the store that will be * Version of the store
* used as a primary.
*/ */
public long getVersion() { public long getVersion() {
return version; return version;
} }
/**
* AllocationStatus id of the store, used to select the store that will be
* used as a primary.
*/
public String getAllocationId() {
return allocationId;
}
/** /**
* Exception while trying to open the * Exception while trying to open the
* shard index or from when the shard failed * shard index or from when the shard failed
@ -146,13 +155,13 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
} }
/** /**
* The allocation status of the store. * The allocationStatus status of the store.
* {@link Allocation#PRIMARY} indicates a primary shard copy * {@link AllocationStatus#PRIMARY} indicates a primary shard copy
* {@link Allocation#REPLICA} indicates a replica shard copy * {@link AllocationStatus#REPLICA} indicates a replica shard copy
* {@link Allocation#UNUSED} indicates an unused shard copy * {@link AllocationStatus#UNUSED} indicates an unused shard copy
*/ */
public Allocation getAllocation() { public AllocationStatus getAllocationStatus() {
return allocation; return allocationStatus;
} }
static StoreStatus readStoreStatus(StreamInput in) throws IOException { static StoreStatus readStoreStatus(StreamInput in) throws IOException {
@ -165,7 +174,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
node = DiscoveryNode.readNode(in); node = DiscoveryNode.readNode(in);
version = in.readLong(); version = in.readLong();
allocation = Allocation.readFrom(in); allocationId = in.readOptionalString();
allocationStatus = AllocationStatus.readFrom(in);
if (in.readBoolean()) { if (in.readBoolean()) {
storeException = in.readThrowable(); storeException = in.readThrowable();
} }
@ -175,7 +185,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
node.writeTo(out); node.writeTo(out);
out.writeLong(version); out.writeLong(version);
allocation.writeTo(out); out.writeOptionalString(allocationId);
allocationStatus.writeTo(out);
if (storeException != null) { if (storeException != null) {
out.writeBoolean(true); out.writeBoolean(true);
out.writeThrowable(storeException); out.writeThrowable(storeException);
@ -188,7 +199,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
node.toXContent(builder, params); node.toXContent(builder, params);
builder.field(Fields.VERSION, version); builder.field(Fields.VERSION, version);
builder.field(Fields.ALLOCATED, allocation.value()); builder.field(Fields.ALLOCATION_ID, allocationId);
builder.field(Fields.ALLOCATED, allocationStatus.value());
if (storeException != null) { if (storeException != null) {
builder.startObject(Fields.STORE_EXCEPTION); builder.startObject(Fields.STORE_EXCEPTION);
ElasticsearchException.toXContent(builder, params, storeException); ElasticsearchException.toXContent(builder, params, storeException);
@ -206,7 +218,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
} else { } else {
int compare = Long.compare(other.version, version); int compare = Long.compare(other.version, version);
if (compare == 0) { if (compare == 0) {
return Integer.compare(allocation.id, other.allocation.id); return Integer.compare(allocationStatus.id, other.allocationStatus.id);
} }
return compare; return compare;
} }
@ -379,6 +391,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon
static final XContentBuilderString STORES = new XContentBuilderString("stores"); static final XContentBuilderString STORES = new XContentBuilderString("stores");
// StoreStatus fields // StoreStatus fields
static final XContentBuilderString VERSION = new XContentBuilderString("version"); static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id");
static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception"); static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception");
static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation"); static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation");
} }

View File

@ -179,8 +179,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
} }
for (NodeGatewayStartedShards response : fetchResponse.responses) { for (NodeGatewayStartedShards response : fetchResponse.responses) {
if (shardExistsInNode(response)) { if (shardExistsInNode(response)) {
IndicesShardStoresResponse.StoreStatus.Allocation allocation = getAllocation(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode()); IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode());
storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), allocation, response.storeException())); storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException()));
} }
} }
CollectionUtil.timSort(storeStatuses); CollectionUtil.timSort(storeStatuses);
@ -193,27 +193,27 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder))); listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder)));
} }
private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) { private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) {
for (ShardRouting shardRouting : routingNodes.node(node.id())) { for (ShardRouting shardRouting : routingNodes.node(node.id())) {
ShardId shardId = shardRouting.shardId(); ShardId shardId = shardRouting.shardId();
if (shardId.id() == shardID && shardId.getIndex().equals(index)) { if (shardId.id() == shardID && shardId.getIndex().equals(index)) {
if (shardRouting.primary()) { if (shardRouting.primary()) {
return IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY; return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY;
} else if (shardRouting.assignedToNode()) { } else if (shardRouting.assignedToNode()) {
return IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA; return IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA;
} else { } else {
return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
} }
} }
} }
return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED;
} }
/** /**
* A shard exists/existed in a node only if shard state file exists in the node * A shard exists/existed in a node only if shard state file exists in the node
*/ */
private boolean shardExistsInNode(final NodeGatewayStartedShards response) { private boolean shardExistsInNode(final NodeGatewayStartedShards response) {
return response.storeException() != null || response.version() != -1; return response.storeException() != null || response.version() != -1 || response.allocationId() != null;
} }
@Override @Override

View File

@ -0,0 +1,203 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.bulk;
import org.elasticsearch.common.unit.TimeValue;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal
* thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally.
*
* Notes for implementing custom subclasses:
*
* The underlying mathematical principle of <code>BackoffPolicy</code> are progressions which can be either finite or infinite although
* the latter should not be used for retrying. A progression can be mapped to a <code>java.util.Iterator</code> with the following
* semantics:
*
* <ul>
* <li><code>#hasNext()</code> determines whether the progression has more elements. Return <code>true</code> for infinite progressions</li>
* <li><code>#next()</code> determines the next element in the progression, i.e. the next wait time period</li>
* </ul>
*
* Note that backoff policies are exposed as <code>Iterables</code> in order to be consumed multiple times.
*/
public abstract class BackoffPolicy implements Iterable<TimeValue> {
private static final BackoffPolicy NO_BACKOFF = new NoBackoff();
/**
* Creates a backoff policy that will not allow any backoff, i.e. an operation will fail after the first attempt.
*
* @return A backoff policy without any backoff period. The returned instance is thread safe.
*/
public static BackoffPolicy noBackoff() {
return NO_BACKOFF;
}
/**
* Creates an new constant backoff policy with the provided configuration.
*
* @param delay The delay defines how long to wait between retry attempts. Must not be null.
* Must be &lt;= <code>Integer.MAX_VALUE</code> ms.
* @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number.
* @return A backoff policy with a constant wait time between retries. The returned instance is thread safe but each
* iterator created from it should only be used by a single thread.
*/
public static BackoffPolicy constantBackoff(TimeValue delay, int maxNumberOfRetries) {
return new ConstantBackoff(checkDelay(delay), maxNumberOfRetries);
}
/**
* Creates an new exponential backoff policy with a default configuration of 50 ms initial wait period and 8 retries taking
* roughly 5.1 seconds in total.
*
* @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each
* iterator created from it should only be used by a single thread.
*/
public static BackoffPolicy exponentialBackoff() {
return exponentialBackoff(TimeValue.timeValueMillis(50), 8);
}
/**
* Creates an new exponential backoff policy with the provided configuration.
*
* @param initialDelay The initial delay defines how long to wait for the first retry attempt. Must not be null.
* Must be &lt;= <code>Integer.MAX_VALUE</code> ms.
* @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number.
* @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each
* iterator created from it should only be used by a single thread.
*/
public static BackoffPolicy exponentialBackoff(TimeValue initialDelay, int maxNumberOfRetries) {
return new ExponentialBackoff((int) checkDelay(initialDelay).millis(), maxNumberOfRetries);
}
private static TimeValue checkDelay(TimeValue delay) {
if (delay.millis() > Integer.MAX_VALUE) {
throw new IllegalArgumentException("delay must be <= " + Integer.MAX_VALUE + " ms");
}
return delay;
}
private static class NoBackoff extends BackoffPolicy {
@Override
public Iterator<TimeValue> iterator() {
return new Iterator<TimeValue>() {
@Override
public boolean hasNext() {
return false;
}
@Override
public TimeValue next() {
throw new NoSuchElementException("No backoff");
}
};
}
}
private static class ExponentialBackoff extends BackoffPolicy {
private final int start;
private final int numberOfElements;
private ExponentialBackoff(int start, int numberOfElements) {
assert start >= 0;
assert numberOfElements >= 0;
this.start = start;
this.numberOfElements = numberOfElements;
}
@Override
public Iterator<TimeValue> iterator() {
return new ExponentialBackoffIterator(start, numberOfElements);
}
}
private static class ExponentialBackoffIterator implements Iterator<TimeValue> {
private final int numberOfElements;
private final int start;
private int currentlyConsumed;
private ExponentialBackoffIterator(int start, int numberOfElements) {
this.start = start;
this.numberOfElements = numberOfElements;
}
@Override
public boolean hasNext() {
return currentlyConsumed < numberOfElements;
}
@Override
public TimeValue next() {
if (!hasNext()) {
throw new NoSuchElementException("Only up to " + numberOfElements + " elements");
}
int result = start + 10 * ((int) Math.exp(0.8d * (currentlyConsumed)) - 1);
currentlyConsumed++;
return TimeValue.timeValueMillis(result);
}
}
private static final class ConstantBackoff extends BackoffPolicy {
private final TimeValue delay;
private final int numberOfElements;
public ConstantBackoff(TimeValue delay, int numberOfElements) {
assert numberOfElements >= 0;
this.delay = delay;
this.numberOfElements = numberOfElements;
}
@Override
public Iterator<TimeValue> iterator() {
return new ConstantBackoffIterator(delay, numberOfElements);
}
}
private static final class ConstantBackoffIterator implements Iterator<TimeValue> {
private final TimeValue delay;
private final int numberOfElements;
private int curr;
public ConstantBackoffIterator(TimeValue delay, int numberOfElements) {
this.delay = delay;
this.numberOfElements = numberOfElements;
}
@Override
public boolean hasNext() {
return curr < numberOfElements;
}
@Override
public TimeValue next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
curr++;
return delay;
}
}
}

View File

@ -19,7 +19,6 @@
package org.elasticsearch.action.bulk; package org.elasticsearch.action.bulk;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequest;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
@ -48,7 +47,7 @@ public class BulkProcessor implements Closeable {
/** /**
* A listener for the execution. * A listener for the execution.
*/ */
public static interface Listener { public interface Listener {
/** /**
* Callback before the bulk is executed. * Callback before the bulk is executed.
@ -62,6 +61,9 @@ public class BulkProcessor implements Closeable {
/** /**
* Callback after a failed execution of bulk request. * Callback after a failed execution of bulk request.
*
* Note that in case an instance of <code>InterruptedException</code> is passed, which means that request processing has been
* cancelled externally, the thread's interruption status has been restored prior to calling this method.
*/ */
void afterBulk(long executionId, BulkRequest request, Throwable failure); void afterBulk(long executionId, BulkRequest request, Throwable failure);
} }
@ -79,6 +81,7 @@ public class BulkProcessor implements Closeable {
private int bulkActions = 1000; private int bulkActions = 1000;
private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB);
private TimeValue flushInterval = null; private TimeValue flushInterval = null;
private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff();
/** /**
* Creates a builder of bulk processor with the client to use and the listener that will be used * Creates a builder of bulk processor with the client to use and the listener that will be used
@ -136,11 +139,27 @@ public class BulkProcessor implements Closeable {
return this; return this;
} }
/**
* Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally
* in case they have failed due to resource constraints (i.e. a thread pool was full).
*
* The default is to back off exponentially.
*
* @see org.elasticsearch.action.bulk.BackoffPolicy#exponentialBackoff()
*/
public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) {
if (backoffPolicy == null) {
throw new NullPointerException("'backoffPolicy' must not be null. To disable backoff, pass BackoffPolicy.noBackoff()");
}
this.backoffPolicy = backoffPolicy;
return this;
}
/** /**
* Builds a new bulk processor. * Builds a new bulk processor.
*/ */
public BulkProcessor build() { public BulkProcessor build() {
return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); return new BulkProcessor(client, backoffPolicy, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval);
} }
} }
@ -152,38 +171,27 @@ public class BulkProcessor implements Closeable {
return new Builder(client, listener); return new Builder(client, listener);
} }
private final Client client;
private final Listener listener;
private final String name;
private final int concurrentRequests;
private final int bulkActions; private final int bulkActions;
private final long bulkSize; private final long bulkSize;
private final TimeValue flushInterval;
private final Semaphore semaphore;
private final ScheduledThreadPoolExecutor scheduler; private final ScheduledThreadPoolExecutor scheduler;
private final ScheduledFuture scheduledFuture; private final ScheduledFuture scheduledFuture;
private final AtomicLong executionIdGen = new AtomicLong(); private final AtomicLong executionIdGen = new AtomicLong();
private BulkRequest bulkRequest; private BulkRequest bulkRequest;
private final BulkRequestHandler bulkRequestHandler;
private volatile boolean closed = false; private volatile boolean closed = false;
BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) {
this.client = client;
this.listener = listener;
this.name = name;
this.concurrentRequests = concurrentRequests;
this.bulkActions = bulkActions; this.bulkActions = bulkActions;
this.bulkSize = bulkSize.bytes(); this.bulkSize = bulkSize.bytes();
this.semaphore = new Semaphore(concurrentRequests);
this.bulkRequest = new BulkRequest(); this.bulkRequest = new BulkRequest();
this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests);
this.flushInterval = flushInterval;
if (flushInterval != null) { if (flushInterval != null) {
this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor")); this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor"));
this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
@ -231,14 +239,7 @@ public class BulkProcessor implements Closeable {
if (bulkRequest.numberOfActions() > 0) { if (bulkRequest.numberOfActions() > 0) {
execute(); execute();
} }
if (this.concurrentRequests < 1) { return this.bulkRequestHandler.awaitClose(timeout, unit);
return true;
}
if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
semaphore.release(this.concurrentRequests);
return true;
}
return false;
} }
/** /**
@ -308,58 +309,7 @@ public class BulkProcessor implements Closeable {
final long executionId = executionIdGen.incrementAndGet(); final long executionId = executionIdGen.incrementAndGet();
this.bulkRequest = new BulkRequest(); this.bulkRequest = new BulkRequest();
this.bulkRequestHandler.execute(bulkRequest, executionId);
if (concurrentRequests == 0) {
// execute in a blocking fashion...
boolean afterCalled = false;
try {
listener.beforeBulk(executionId, bulkRequest);
BulkResponse bulkItemResponses = client.bulk(bulkRequest).actionGet();
afterCalled = true;
listener.afterBulk(executionId, bulkRequest, bulkItemResponses);
} catch (Exception e) {
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e);
}
}
} else {
boolean success = false;
boolean acquired = false;
try {
listener.beforeBulk(executionId, bulkRequest);
semaphore.acquire();
acquired = true;
client.bulk(bulkRequest, new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
try {
listener.afterBulk(executionId, bulkRequest, response);
} finally {
semaphore.release();
}
}
@Override
public void onFailure(Throwable e) {
try {
listener.afterBulk(executionId, bulkRequest, e);
} finally {
semaphore.release();
}
}
});
success = true;
} catch (InterruptedException e) {
Thread.interrupted();
listener.afterBulk(executionId, bulkRequest, e);
} catch (Throwable t) {
listener.afterBulk(executionId, bulkRequest, t);
} finally {
if (!success && acquired) { // if we fail on client.bulk() release the semaphore
semaphore.release();
}
}
}
} }
private boolean isOverTheLimit() { private boolean isOverTheLimit() {

View File

@ -0,0 +1,166 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.bulk;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
/**
* Abstracts the low-level details of bulk request handling
*/
abstract class BulkRequestHandler {
protected final ESLogger logger;
protected final Client client;
protected BulkRequestHandler(Client client) {
this.client = client;
this.logger = Loggers.getLogger(getClass(), client.settings());
}
public abstract void execute(BulkRequest bulkRequest, long executionId);
public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException;
public static BulkRequestHandler syncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) {
return new SyncBulkRequestHandler(client, backoffPolicy, listener);
}
public static BulkRequestHandler asyncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) {
return new AsyncBulkRequestHandler(client, backoffPolicy, listener, concurrentRequests);
}
private static class SyncBulkRequestHandler extends BulkRequestHandler {
private final BulkProcessor.Listener listener;
private final BackoffPolicy backoffPolicy;
public SyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) {
super(client);
this.backoffPolicy = backoffPolicy;
this.listener = listener;
}
@Override
public void execute(BulkRequest bulkRequest, long executionId) {
boolean afterCalled = false;
try {
listener.beforeBulk(executionId, bulkRequest);
BulkResponse bulkResponse = Retry
.on(EsRejectedExecutionException.class)
.policy(backoffPolicy)
.withSyncBackoff(client, bulkRequest);
afterCalled = true;
listener.afterBulk(executionId, bulkRequest, bulkResponse);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.info("Bulk request {} has been cancelled.", e, executionId);
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, e);
}
} catch (Throwable t) {
logger.warn("Failed to execute bulk request {}.", t, executionId);
if (!afterCalled) {
listener.afterBulk(executionId, bulkRequest, t);
}
}
}
@Override
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
// we are "closed" immediately as there is no request in flight
return true;
}
}
private static class AsyncBulkRequestHandler extends BulkRequestHandler {
private final BackoffPolicy backoffPolicy;
private final BulkProcessor.Listener listener;
private final Semaphore semaphore;
private final int concurrentRequests;
private AsyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) {
super(client);
this.backoffPolicy = backoffPolicy;
assert concurrentRequests > 0;
this.listener = listener;
this.concurrentRequests = concurrentRequests;
this.semaphore = new Semaphore(concurrentRequests);
}
@Override
public void execute(BulkRequest bulkRequest, long executionId) {
boolean bulkRequestSetupSuccessful = false;
boolean acquired = false;
try {
listener.beforeBulk(executionId, bulkRequest);
semaphore.acquire();
acquired = true;
Retry.on(EsRejectedExecutionException.class)
.policy(backoffPolicy)
.withAsyncBackoff(client, bulkRequest, new ActionListener<BulkResponse>() {
@Override
public void onResponse(BulkResponse response) {
try {
listener.afterBulk(executionId, bulkRequest, response);
} finally {
semaphore.release();
}
}
@Override
public void onFailure(Throwable e) {
try {
listener.afterBulk(executionId, bulkRequest, e);
} finally {
semaphore.release();
}
}
});
bulkRequestSetupSuccessful = true;
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
logger.info("Bulk request {} has been cancelled.", e, executionId);
listener.afterBulk(executionId, bulkRequest, e);
} catch (Throwable t) {
logger.warn("Failed to execute bulk request {}.", t, executionId);
listener.afterBulk(executionId, bulkRequest, t);
} finally {
if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore
semaphore.release();
}
}
}
@Override
public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException {
if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) {
semaphore.release(this.concurrentRequests);
return true;
}
return false;
}
}
}

View File

@ -0,0 +1,237 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.bulk;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.*;
import java.util.function.Predicate;
/**
* Encapsulates synchronous and asynchronous retry logic.
*/
class Retry {
private final Class<? extends Throwable> retryOnThrowable;
private BackoffPolicy backoffPolicy;
public static Retry on(Class<? extends Throwable> retryOnThrowable) {
return new Retry(retryOnThrowable);
}
/**
* @param backoffPolicy The backoff policy that defines how long and how often to wait for retries.
*/
public Retry policy(BackoffPolicy backoffPolicy) {
this.backoffPolicy = backoffPolicy;
return this;
}
Retry(Class<? extends Throwable> retryOnThrowable) {
this.retryOnThrowable = retryOnThrowable;
}
/**
* Invokes #bulk(BulkRequest, ActionListener) on the provided client. Backs off on the provided exception and delegates results to the
* provided listener.
*
* @param client Client invoking the bulk request.
* @param bulkRequest The bulk request that should be executed.
* @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not
*/
public void withAsyncBackoff(Client client, BulkRequest bulkRequest, ActionListener<BulkResponse> listener) {
AsyncRetryHandler r = new AsyncRetryHandler(retryOnThrowable, backoffPolicy, client, listener);
r.execute(bulkRequest);
}
/**
* Invokes #bulk(BulkRequest) on the provided client. Backs off on the provided exception.
*
* @param client Client invoking the bulk request.
* @param bulkRequest The bulk request that should be executed.
* @return the bulk response as returned by the client.
* @throws Exception Any exception thrown by the callable.
*/
public BulkResponse withSyncBackoff(Client client, BulkRequest bulkRequest) throws Exception {
return SyncRetryHandler
.create(retryOnThrowable, backoffPolicy, client)
.executeBlocking(bulkRequest)
.actionGet();
}
static class AbstractRetryHandler implements ActionListener<BulkResponse> {
private final ESLogger logger;
private final Client client;
private final ActionListener<BulkResponse> listener;
private final Iterator<TimeValue> backoff;
private final Class<? extends Throwable> retryOnThrowable;
// Access only when holding a client-side lock, see also #addResponses()
private final List<BulkItemResponse> responses = new ArrayList<>();
private final long startTimestampNanos;
// needed to construct the next bulk request based on the response to the previous one
// volatile as we're called from a scheduled thread
private volatile BulkRequest currentBulkRequest;
private volatile ScheduledFuture<?> scheduledRequestFuture;
public AbstractRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) {
this.retryOnThrowable = retryOnThrowable;
this.backoff = backoffPolicy.iterator();
this.client = client;
this.listener = listener;
this.logger = Loggers.getLogger(getClass(), client.settings());
// in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood
this.startTimestampNanos = System.nanoTime();
}
@Override
public void onResponse(BulkResponse bulkItemResponses) {
if (!bulkItemResponses.hasFailures()) {
// we're done here, include all responses
addResponses(bulkItemResponses, (r -> true));
finishHim();
} else {
if (canRetry(bulkItemResponses)) {
addResponses(bulkItemResponses, (r -> !r.isFailed()));
retry(createBulkRequestForRetry(bulkItemResponses));
} else {
addResponses(bulkItemResponses, (r -> true));
finishHim();
}
}
}
@Override
public void onFailure(Throwable e) {
try {
listener.onFailure(e);
} finally {
FutureUtils.cancel(scheduledRequestFuture);
}
}
private void retry(BulkRequest bulkRequestForRetry) {
assert backoff.hasNext();
TimeValue next = backoff.next();
logger.trace("Retry of bulk request scheduled in {} ms.", next.millis());
scheduledRequestFuture = client.threadPool().schedule(next, ThreadPool.Names.SAME, (() -> this.execute(bulkRequestForRetry)));
}
private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) {
BulkRequest requestToReissue = new BulkRequest();
int index = 0;
for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) {
if (bulkItemResponse.isFailed()) {
requestToReissue.add(currentBulkRequest.requests().get(index));
}
index++;
}
return requestToReissue;
}
private boolean canRetry(BulkResponse bulkItemResponses) {
if (!backoff.hasNext()) {
return false;
}
for (BulkItemResponse bulkItemResponse : bulkItemResponses) {
if (bulkItemResponse.isFailed()) {
Throwable cause = bulkItemResponse.getFailure().getCause();
Throwable rootCause = ExceptionsHelper.unwrapCause(cause);
if (!rootCause.getClass().equals(retryOnThrowable)) {
return false;
}
}
}
return true;
}
private void finishHim() {
try {
listener.onResponse(getAccumulatedResponse());
} finally {
FutureUtils.cancel(scheduledRequestFuture);
}
}
private void addResponses(BulkResponse response, Predicate<BulkItemResponse> filter) {
for (BulkItemResponse bulkItemResponse : response) {
if (filter.test(bulkItemResponse)) {
// Use client-side lock here to avoid visibility issues. This method may be called multiple times
// (based on how many retries we have to issue) and relying that the response handling code will be
// scheduled on the same thread is fragile.
synchronized (responses) {
responses.add(bulkItemResponse);
}
}
}
}
private BulkResponse getAccumulatedResponse() {
BulkItemResponse[] itemResponses;
synchronized (responses) {
itemResponses = responses.toArray(new BulkItemResponse[1]);
}
long stopTimestamp = System.nanoTime();
long totalLatencyMs = TimeValue.timeValueNanos(stopTimestamp - startTimestampNanos).millis();
return new BulkResponse(itemResponses, totalLatencyMs);
}
public void execute(BulkRequest bulkRequest) {
this.currentBulkRequest = bulkRequest;
client.bulk(bulkRequest, this);
}
}
static class AsyncRetryHandler extends AbstractRetryHandler {
public AsyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener<BulkResponse> listener) {
super(retryOnThrowable, backoffPolicy, client, listener);
}
}
static class SyncRetryHandler extends AbstractRetryHandler {
private final PlainActionFuture<BulkResponse> actionFuture;
public static SyncRetryHandler create(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client) {
PlainActionFuture<BulkResponse> actionFuture = PlainActionFuture.newFuture();
return new SyncRetryHandler(retryOnThrowable, backoffPolicy, client, actionFuture);
}
public SyncRetryHandler(Class<? extends Throwable> retryOnThrowable, BackoffPolicy backoffPolicy, Client client, PlainActionFuture<BulkResponse> actionFuture) {
super(retryOnThrowable, backoffPolicy, client, actionFuture);
this.actionFuture = actionFuture;
}
public ActionFuture<BulkResponse> executeBlocking(BulkRequest bulkRequest) {
super.execute(bulkRequest);
return actionFuture;
}
}
}

View File

@ -20,7 +20,6 @@
package org.elasticsearch.action.percolate; package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.IndicesRequest;
@ -37,8 +36,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.StringText;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;

View File

@ -473,6 +473,14 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
return this; return this;
} }
/**
* Should the query be profiled. Defaults to <code>false</code>
*/
public SearchRequestBuilder setProfile(boolean profile) {
sourceBuilder().profile(profile);
return this;
}
@Override @Override
public String toString() { public String toString() {
if (request.source() != null) { if (request.source() != null) {

View File

@ -20,6 +20,7 @@
package org.elasticsearch.action.search; package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
@ -32,9 +33,12 @@ import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregations;
import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.profile.ProfileShardResult;
import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.Suggest;
import java.io.IOException; import java.io.IOException;
import java.util.List;
import java.util.Map;
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse; import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse;
@ -160,6 +164,16 @@ public class SearchResponse extends ActionResponse implements StatusToXContent {
this.scrollId = scrollId; this.scrollId = scrollId;
} }
/**
* If profiling was enabled, this returns an object containing the profile results from
* each shard. If profiling was not enabled, this will return null
*
* @return The profile results or null
*/
public @Nullable Map<String, List<ProfileShardResult>> getProfileResults() {
return internalResponse.profile();
}
static final class Fields { static final class Fields {
static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id"); static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id");
static final XContentBuilderString TOOK = new XContentBuilderString("took"); static final XContentBuilderString TOOK = new XContentBuilderString("took");

View File

@ -21,25 +21,30 @@ package org.elasticsearch.action.support;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
/** /**
* Helper for dealing with destructive operations and wildcard usage. * Helper for dealing with destructive operations and wildcard usage.
*/ */
public final class DestructiveOperations extends AbstractComponent implements NodeSettingsService.Listener { public final class DestructiveOperations extends AbstractComponent {
/** /**
* Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed.
*/ */
public static final String REQUIRES_NAME = "action.destructive_requires_name"; public static final Setting<Boolean> REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER);
private volatile boolean destructiveRequiresName; private volatile boolean destructiveRequiresName;
@Inject @Inject
public DestructiveOperations(Settings settings, NodeSettingsService nodeSettingsService) { public DestructiveOperations(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
destructiveRequiresName = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, false); destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings);
nodeSettingsService.addListener(this); clusterSettings.addSettingsUpdateConsumer(REQUIRES_NAME_SETTING, this::setDestructiveRequiresName);
}
private void setDestructiveRequiresName(boolean destructiveRequiresName) {
this.destructiveRequiresName = destructiveRequiresName;
} }
/** /**
@ -65,15 +70,6 @@ public final class DestructiveOperations extends AbstractComponent implements No
} }
} }
@Override
public void onRefreshSettings(Settings settings) {
boolean newValue = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, destructiveRequiresName);
if (destructiveRequiresName != newValue) {
logger.info("updating [action.operate_all_indices] from [{}] to [{}]", destructiveRequiresName, newValue);
this.destructiveRequiresName = newValue;
}
}
private static boolean hasWildcardUsage(String aliasOrIndex) { private static boolean hasWildcardUsage(String aliasOrIndex) {
return "_all".equals(aliasOrIndex) || aliasOrIndex.indexOf('*') != -1; return "_all".equals(aliasOrIndex) || aliasOrIndex.indexOf('*') != -1;
} }

View File

@ -19,8 +19,16 @@
package org.elasticsearch.action.support.broadcast.node; package org.elasticsearch.action.support.broadcast.node;
import org.elasticsearch.action.*; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.*; import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastResponse;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
@ -37,7 +45,14 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*; import org.elasticsearch.transport.BaseTransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
@ -394,7 +409,15 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
e.setIndex(shardRouting.getIndex()); e.setIndex(shardRouting.getIndex());
e.setShard(shardRouting.shardId()); e.setShard(shardRouting.shardId());
shardResults[shardIndex] = e; shardResults[shardIndex] = e;
logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary()); if (TransportActions.isShardNotAvailableException(t)) {
if (logger.isTraceEnabled()) {
logger.trace("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("[{}] failed to execute operation for shard [{}]", t, actionName, shardRouting.shortSummary());
}
}
} }
} }
} }

View File

@ -300,11 +300,15 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
@Override @Override
public void onFailure(Throwable t) { public void onFailure(Throwable t) {
if (t instanceof RetryOnReplicaException) { if (t instanceof RetryOnReplicaException) {
logger.trace("Retrying operation on replica, action [{}], request [{}]", t, actionName, request); logger.trace("Retrying operation on replica, action [{}], request [{}]", t, transportReplicaAction, request);
observer.waitForNextChange(new ClusterStateObserver.Listener() { observer.waitForNextChange(new ClusterStateObserver.Listener() {
@Override @Override
public void onNewClusterState(ClusterState state) { public void onNewClusterState(ClusterState state) {
threadPool.executor(executor).execute(AsyncReplicaAction.this); // Forking a thread on local node via transport service so that custom transport service have an
// opportunity to execute custom logic before the replica operation begins
String extraMessage = "action [" + transportReplicaAction + "], request[" + request + "]";
TransportChannelResponseHandler<TransportResponse.Empty> handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage);
transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler);
} }
@Override @Override

View File

@ -50,6 +50,7 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext;
import org.elasticsearch.search.lookup.SourceLookup; import org.elasticsearch.search.lookup.SourceLookup;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -245,7 +246,7 @@ public class UpdateHelper extends AbstractComponent {
private Map<String, Object> executeScript(UpdateRequest request, Map<String, Object> ctx) { private Map<String, Object> executeScript(UpdateRequest request, Map<String, Object> ctx) {
try { try {
if (scriptService != null) { if (scriptService != null) {
ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request); ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request, Collections.emptyMap());
script.setNextVar("ctx", ctx); script.setNextVar("ctx", ctx);
script.run(); script.run();
// we need to unwrap the ctx... // we need to unwrap the ctx...

View File

@ -53,8 +53,8 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse;
import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.flush.FlushResponse;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
@ -82,11 +82,14 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse;
@ -390,6 +393,29 @@ public interface IndicesAdminClient extends ElasticsearchClient {
*/ */
FlushRequestBuilder prepareFlush(String... indices); FlushRequestBuilder prepareFlush(String... indices);
/**
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
*
* @param request The sync flush request
* @return A result future
* @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
*/
ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request);
/**
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
*
* @param request The sync flush request
* @param listener A listener to be notified with a result
* @see org.elasticsearch.client.Requests#syncedFlushRequest(String...)
*/
void syncedFlush(SyncedFlushRequest request, ActionListener <SyncedFlushResponse> listener);
/**
* Explicitly sync flush one or more indices (write sync id to shards for faster recovery).
*/
SyncedFlushRequestBuilder prepareSyncedFlush(String... indices);
/** /**
* Explicitly force merge one or more indices into a the number of segments. * Explicitly force merge one or more indices into a the number of segments.
* *

View File

@ -50,6 +50,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest;
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequest;
@ -265,6 +266,17 @@ public class Requests {
return new FlushRequest(indices); return new FlushRequest(indices);
} }
/**
* Creates a synced flush indices request.
*
* @param indices The indices to sync flush. Use <tt>null</tt> or <tt>_all</tt> to execute against all indices
* @return The synced flush request
* @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest)
*/
public static SyncedFlushRequest syncedFlushRequest(String... indices) {
return new SyncedFlushRequest(indices);
}
/** /**
* Creates a force merge request. * Creates a force merge request.
* *

View File

@ -188,6 +188,10 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder;
import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest;
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder;
@ -1315,6 +1319,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client
return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices); return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices);
} }
@Override
public ActionFuture<SyncedFlushResponse> syncedFlush(SyncedFlushRequest request) {
return execute(SyncedFlushAction.INSTANCE, request);
}
@Override
public void syncedFlush(SyncedFlushRequest request, ActionListener<SyncedFlushResponse> listener) {
execute(SyncedFlushAction.INSTANCE, request, listener);
}
@Override
public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) {
return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices);
}
@Override @Override
public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) { public void getMappings(GetMappingsRequest request, ActionListener<GetMappingsResponse> listener) {
execute(GetMappingsAction.INSTANCE, request, listener); execute(GetMappingsAction.INSTANCE, request, listener);

View File

@ -32,7 +32,6 @@ import org.elasticsearch.client.support.Headers;
import org.elasticsearch.client.transport.support.TransportProxyClient; import org.elasticsearch.client.transport.support.TransportProxyClient;
import org.elasticsearch.cluster.ClusterNameModule; import org.elasticsearch.cluster.ClusterNameModule;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.component.LifecycleComponent;
import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.inject.Module;
@ -43,19 +42,15 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.settings.SettingsModule;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.EnvironmentModule;
import org.elasticsearch.indices.breaker.CircuitBreakerModule; import org.elasticsearch.indices.breaker.CircuitBreakerModule;
import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.MonitorService;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsModule;
import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.PluginsService;
import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchModule;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.threadpool.ThreadPoolModule; import org.elasticsearch.threadpool.ThreadPoolModule;
import org.elasticsearch.transport.TransportModule;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.netty.NettyTransport; import org.elasticsearch.transport.netty.NettyTransport;
@ -69,7 +64,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder;
* The transport client allows to create a client that is not part of the cluster, but simply connects to one * The transport client allows to create a client that is not part of the cluster, but simply connects to one
* or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}. * or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}.
* <p> * <p>
* The transport client important modules used is the {@link org.elasticsearch.transport.TransportModule} which is * The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is
* started in client mode (only connects, no bind). * started in client mode (only connects, no bind).
*/ */
public class TransportClient extends AbstractClient { public class TransportClient extends AbstractClient {
@ -143,10 +138,9 @@ public class TransportClient extends AbstractClient {
} }
modules.add(new PluginsModule(pluginsService)); modules.add(new PluginsModule(pluginsService));
modules.add(new SettingsModule(this.settings, settingsFilter )); modules.add(new SettingsModule(this.settings, settingsFilter ));
modules.add(new NetworkModule(networkService)); modules.add(new NetworkModule(networkService, this.settings, true));
modules.add(new ClusterNameModule(this.settings)); modules.add(new ClusterNameModule(this.settings));
modules.add(new ThreadPoolModule(threadPool)); modules.add(new ThreadPoolModule(threadPool));
modules.add(new TransportModule(this.settings));
modules.add(new SearchModule() { modules.add(new SearchModule() {
@Override @Override
protected void configure() { protected void configure() {
@ -154,7 +148,6 @@ public class TransportClient extends AbstractClient {
} }
}); });
modules.add(new ActionModule(true)); modules.add(new ActionModule(true));
modules.add(new ClientTransportModule());
modules.add(new CircuitBreakerModule(this.settings)); modules.add(new CircuitBreakerModule(this.settings));
pluginsService.processModules(modules); pluginsService.processModules(modules);

View File

@ -19,9 +19,6 @@
package org.elasticsearch.cluster; package org.elasticsearch.cluster;
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction;
import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction;
@ -29,7 +26,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateFilter; import org.elasticsearch.cluster.metadata.IndexTemplateFilter;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService;
@ -60,17 +56,15 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio
import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.cluster.settings.ClusterDynamicSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.DynamicSettings;
import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineConfig;
@ -81,21 +75,13 @@ import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.index.shard.MergePolicyConfig;
import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig;
import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.IndexStore;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.index.translog.TranslogConfig;
import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.indices.cache.request.IndicesRequestCache;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.internal.DefaultSearchContext; import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.Arrays; import java.util.*;
import java.util.Collections;
import java.util.List;
/** /**
* Configures classes and services that affect the entire cluster. * Configures classes and services that affect the entire cluster.
@ -122,7 +108,6 @@ public class ClusterModule extends AbstractModule {
SnapshotInProgressAllocationDecider.class)); SnapshotInProgressAllocationDecider.class));
private final Settings settings; private final Settings settings;
private final DynamicSettings.Builder clusterDynamicSettings = new DynamicSettings.Builder();
private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder(); private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder();
private final ExtensionPoint.SelectedType<ShardsAllocator> shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class); private final ExtensionPoint.SelectedType<ShardsAllocator> shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class);
private final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class); private final ExtensionPoint.ClassSet<AllocationDecider> allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class);
@ -134,7 +119,6 @@ public class ClusterModule extends AbstractModule {
public ClusterModule(Settings settings) { public ClusterModule(Settings settings) {
this.settings = settings; this.settings = settings;
registerBuiltinClusterSettings();
registerBuiltinIndexSettings(); registerBuiltinIndexSettings();
for (Class<? extends AllocationDecider> decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) { for (Class<? extends AllocationDecider> decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
@ -144,70 +128,11 @@ public class ClusterModule extends AbstractModule {
registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class); registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class);
} }
private void registerBuiltinClusterSettings() {
registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, Validator.EMPTY);
registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "*", Validator.EMPTY);
registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, Validator.FLOAT);
registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, Validator.FLOAT);
registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_THRESHOLD, Validator.NON_NEGATIVE_FLOAT);
registerClusterDynamicSetting(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ALLOCATION_ALLOW_REBALANCE_VALIDATOR);
registerClusterDynamicSetting(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, Validator.INTEGER);
registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY);
registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, Validator.EMPTY);
registerClusterDynamicSetting(ZenDiscovery.SETTING_REJOIN_ON_MASTER_GONE, Validator.BOOLEAN);
registerClusterDynamicSetting(DiscoverySettings.NO_MASTER_BLOCK, Validator.EMPTY);
registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*", Validator.EMPTY);
registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*", Validator.EMPTY);
registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY);
registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE, Validator.EMPTY);
registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
registerClusterDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME);
registerClusterDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, Validator.TIME);
registerClusterDynamicSetting(MetaData.SETTING_READ_ONLY, Validator.EMPTY);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR);
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER);
registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER);
registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, Validator.EMPTY);
registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, Validator.EMPTY);
registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, Validator.BOOLEAN);
registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, Validator.BOOLEAN);
registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, Validator.EMPTY);
registerClusterDynamicSetting(DestructiveOperations.REQUIRES_NAME, Validator.EMPTY);
registerClusterDynamicSetting(DiscoverySettings.PUBLISH_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(DiscoverySettings.PUBLISH_DIFF_ENABLE, Validator.BOOLEAN);
registerClusterDynamicSetting(DiscoverySettings.COMMIT_TIMEOUT, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE);
registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE);
registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE);
registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE);
registerClusterDynamicSetting(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, Validator.TIME_NON_NEGATIVE);
registerClusterDynamicSetting(SearchService.DEFAULT_SEARCH_TIMEOUT, Validator.TIMEOUT);
registerClusterDynamicSetting(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_VALIDATOR);
registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE, Validator.EMPTY);
registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE + ".*", Validator.EMPTY);
registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE, Validator.EMPTY);
registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE + ".*", Validator.EMPTY);
registerClusterDynamicSetting(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, Validator.BOOLEAN);
registerClusterDynamicSetting(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, Validator.INTEGER);
registerClusterDynamicSetting(TransportReplicationAction.SHARD_FAILURE_TIMEOUT, Validator.TIME_NON_NEGATIVE);
}
private void registerBuiltinIndexSettings() { private void registerBuiltinIndexSettings() {
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE);
registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY); registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY);
registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.EMPTY); registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.NON_NEGATIVE_INTEGER);
registerIndexDynamicSetting(MergeSchedulerConfig.MAX_MERGE_COUNT, Validator.EMPTY); registerIndexDynamicSetting(MergeSchedulerConfig.MAX_MERGE_COUNT, Validator.EMPTY);
registerIndexDynamicSetting(MergeSchedulerConfig.AUTO_THROTTLE, Validator.EMPTY); registerIndexDynamicSetting(MergeSchedulerConfig.AUTO_THROTTLE, Validator.EMPTY);
registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY); registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY);
@ -272,9 +197,6 @@ public class ClusterModule extends AbstractModule {
indexDynamicSettings.addSetting(setting, validator); indexDynamicSettings.addSetting(setting, validator);
} }
public void registerClusterDynamicSetting(String setting, Validator validator) {
clusterDynamicSettings.addSetting(setting, validator);
}
public void registerAllocationDecider(Class<? extends AllocationDecider> allocationDecider) { public void registerAllocationDecider(Class<? extends AllocationDecider> allocationDecider) {
allocationDeciders.registerExtension(allocationDecider); allocationDeciders.registerExtension(allocationDecider);
@ -290,7 +212,6 @@ public class ClusterModule extends AbstractModule {
@Override @Override
protected void configure() { protected void configure() {
bind(DynamicSettings.class).annotatedWith(ClusterDynamicSettings.class).toInstance(clusterDynamicSettings.build());
bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings.build()); bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings.build());
// bind ShardsAllocator // bind ShardsAllocator

View File

@ -129,7 +129,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
T proto = (T)customPrototypes.get(type); T proto = (T)customPrototypes.get(type);
if (proto == null) { if (proto == null) {
throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "]"); throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins");
} }
return proto; return proto;
} }

View File

@ -37,6 +37,13 @@ public interface ClusterStateTaskExecutor<T> {
return true; return true;
} }
/**
* Callback invoked after new cluster state is published. Note that
* this method is not invoked if the cluster state was not updated.
*/
default void clusterStatePublished(ClusterState newClusterState) {
}
/** /**
* Represents the result of a batched execution of cluster state update tasks * Represents the result of a batched execution of cluster state update tasks
* @param <T> the type of the cluster state update task * @param <T> the type of the cluster state update task

View File

@ -37,11 +37,12 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsInfo;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ReceiveTimeoutTransportException; import org.elasticsearch.transport.ReceiveTimeoutTransportException;
@ -63,8 +64,8 @@ import java.util.concurrent.TimeUnit;
*/ */
public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
public static final String INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL = "cluster.info.update.interval"; public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER);
public static final String INTERNAL_CLUSTER_INFO_TIMEOUT = "cluster.info.update.timeout"; public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER);
private volatile TimeValue updateFrequency; private volatile TimeValue updateFrequency;
@ -82,7 +83,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
private final List<Listener> listeners = new CopyOnWriteArrayList<>(); private final List<Listener> listeners = new CopyOnWriteArrayList<>();
@Inject @Inject
public InternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService, public InternalClusterInfoService(Settings settings, ClusterSettings clusterSettings,
TransportNodesStatsAction transportNodesStatsAction, TransportNodesStatsAction transportNodesStatsAction,
TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService, TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService,
ThreadPool threadPool) { ThreadPool threadPool) {
@ -95,10 +96,12 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
this.transportIndicesStatsAction = transportIndicesStatsAction; this.transportIndicesStatsAction = transportIndicesStatsAction;
this.clusterService = clusterService; this.clusterService = clusterService;
this.threadPool = threadPool; this.threadPool = threadPool;
this.updateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, TimeValue.timeValueSeconds(30)); this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings);
this.fetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, TimeValue.timeValueSeconds(15)); this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings);
this.enabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true); this.enabled = DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings);
nodeSettingsService.addListener(new ApplySettings()); clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout);
clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency);
clusterSettings.addSettingsUpdateConsumer(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled);
// Add InternalClusterInfoService to listen for Master changes // Add InternalClusterInfoService to listen for Master changes
this.clusterService.add((LocalNodeMasterListener)this); this.clusterService.add((LocalNodeMasterListener)this);
@ -106,35 +109,16 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
this.clusterService.add((ClusterStateListener)this); this.clusterService.add((ClusterStateListener)this);
} }
class ApplySettings implements NodeSettingsService.Listener { private void setEnabled(boolean enabled) {
@Override this.enabled = enabled;
public void onRefreshSettings(Settings settings) {
TimeValue newUpdateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, null);
// ClusterInfoService is only enabled if the DiskThresholdDecider is enabled
Boolean newEnabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null);
if (newUpdateFrequency != null) {
if (newUpdateFrequency.getMillis() < TimeValue.timeValueSeconds(10).getMillis()) {
logger.warn("[{}] set too low [{}] (< 10s)", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, newUpdateFrequency);
throw new IllegalStateException("Unable to set " + INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL + " less than 10 seconds");
} else {
logger.info("updating [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, updateFrequency, newUpdateFrequency);
InternalClusterInfoService.this.updateFrequency = newUpdateFrequency;
}
} }
TimeValue newFetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, null); private void setFetchTimeout(TimeValue fetchTimeout) {
if (newFetchTimeout != null) { this.fetchTimeout = fetchTimeout;
logger.info("updating fetch timeout [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_TIMEOUT, fetchTimeout, newFetchTimeout);
InternalClusterInfoService.this.fetchTimeout = newFetchTimeout;
} }
void setUpdateFrequency(TimeValue updateFrequency) {
// We don't log about enabling it here, because the DiskThresholdDecider will already be logging about enable/disable this.updateFrequency = updateFrequency;
if (newEnabled != null) {
InternalClusterInfoService.this.enabled = newEnabled;
}
}
} }
@Override @Override

View File

@ -26,11 +26,12 @@ import org.elasticsearch.client.Client;
import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
@ -40,30 +41,23 @@ import java.util.concurrent.TimeoutException;
*/ */
public class MappingUpdatedAction extends AbstractComponent { public class MappingUpdatedAction extends AbstractComponent {
public static final String INDICES_MAPPING_DYNAMIC_TIMEOUT = "indices.mapping.dynamic_timeout"; public static final Setting<TimeValue> INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
private IndicesAdminClient client; private IndicesAdminClient client;
private volatile TimeValue dynamicMappingUpdateTimeout; private volatile TimeValue dynamicMappingUpdateTimeout;
class ApplySettings implements NodeSettingsService.Listener { @Inject
@Override public MappingUpdatedAction(Settings settings, ClusterSettings clusterSettings) {
public void onRefreshSettings(Settings settings) { super(settings);
TimeValue current = MappingUpdatedAction.this.dynamicMappingUpdateTimeout; this.dynamicMappingUpdateTimeout = INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.get(settings);
TimeValue newValue = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, current); clusterSettings.addSettingsUpdateConsumer(INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, this::setDynamicMappingUpdateTimeout);
if (!current.equals(newValue)) {
logger.info("updating " + INDICES_MAPPING_DYNAMIC_TIMEOUT + " from [{}] to [{}]", current, newValue);
MappingUpdatedAction.this.dynamicMappingUpdateTimeout = newValue;
}
}
} }
@Inject private void setDynamicMappingUpdateTimeout(TimeValue dynamicMappingUpdateTimeout) {
public MappingUpdatedAction(Settings settings, NodeSettingsService nodeSettingsService) { this.dynamicMappingUpdateTimeout = dynamicMappingUpdateTimeout;
super(settings);
this.dynamicMappingUpdateTimeout = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, TimeValue.timeValueSeconds(30));
nodeSettingsService.addListener(new ApplySettings());
} }
public void setClient(Client client) { public void setClient(Client client) {
this.client = client.admin().indices(); this.client = client.admin().indices();
} }

View File

@ -20,7 +20,12 @@
package org.elasticsearch.cluster.action.shard; package org.elasticsearch.cluster.action.shard;
import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingService; import org.elasticsearch.cluster.routing.RoutingService;
@ -37,19 +42,24 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.*; import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportRequestOptions;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Locale;
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
/**
*
*/
public class ShardStateAction extends AbstractComponent {
public class ShardStateAction extends AbstractComponent {
public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started"; public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started";
public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure"; public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure";
@ -105,12 +115,95 @@ public class ShardStateAction extends AbstractComponent {
@Override @Override
public void handleException(TransportException exp) { public void handleException(TransportException exp) {
logger.warn("failed to send failed shard to {}", exp, masterNode); logger.warn("unexpected failure while sending request to [{}] to fail shard [{}]", exp, masterNode, shardRoutingEntry);
listener.onShardFailedFailure(masterNode, exp); listener.onShardFailedFailure(masterNode, exp);
} }
}); });
} }
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
@Override
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
handleShardFailureOnMaster(request, new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure while failing shard [{}]", t, request.shardRouting);
try {
channel.sendResponse(t);
} catch (Throwable channelThrowable) {
logger.warn("failed to send failure [{}] while failing shard [{}]", channelThrowable, t, request.shardRouting);
}
}
@Override
public void onNoLongerMaster(String source) {
logger.error("no longer master while failing shard [{}]", request.shardRouting);
try {
channel.sendResponse(new NotMasterException(source));
} catch (Throwable channelThrowable) {
logger.warn("failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting);
}
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
try {
channel.sendResponse(TransportResponse.Empty.INSTANCE);
} catch (Throwable channelThrowable) {
logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting);
}
}
}
);
}
}
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry> {
@Override
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception {
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder();
List<FailedRerouteAllocation.FailedShard> failedShards = new ArrayList<>(tasks.size());
for (ShardRoutingEntry task : tasks) {
failedShards.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
}
ClusterState maybeUpdatedState = currentState;
try {
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, failedShards);
if (result.changed()) {
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
}
batchResultBuilder.successes(tasks);
} catch (Throwable t) {
batchResultBuilder.failures(tasks, t);
}
return batchResultBuilder.build(maybeUpdatedState);
}
@Override
public void clusterStatePublished(ClusterState newClusterState) {
int numberOfUnassignedShards = newClusterState.getRoutingNodes().unassigned().size();
if (numberOfUnassignedShards > 0) {
String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards);
if (logger.isTraceEnabled()) {
logger.trace(reason + ", scheduling a reroute");
}
routingService.reroute(reason);
}
}
}
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler();
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry, ClusterStateTaskListener listener) {
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
clusterService.submitStateUpdateTask(
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
shardRoutingEntry,
ClusterStateTaskConfig.build(Priority.HIGH),
shardFailedClusterStateHandler,
listener);
}
public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) {
DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); DiscoveryNode masterNode = clusterService.state().nodes().masterNode();
if (masterNode == null) { if (masterNode == null) {
@ -129,69 +222,15 @@ public class ShardStateAction extends AbstractComponent {
public void handleException(TransportException exp) { public void handleException(TransportException exp) {
logger.warn("failed to send shard started to [{}]", exp, masterNode); logger.warn("failed to send shard started to [{}]", exp, masterNode);
} }
}); });
} }
private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler(); class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> {
private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) {
logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry);
clusterService.submitStateUpdateTask(
"shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]",
shardRoutingEntry,
ClusterStateTaskConfig.build(Priority.HIGH),
shardFailedClusterStateHandler,
shardFailedClusterStateHandler);
}
class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
@Override @Override
public BatchResult<ShardRoutingEntry> execute(ClusterState currentState, List<ShardRoutingEntry> tasks) throws Exception { public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception {
BatchResult.Builder<ShardRoutingEntry> batchResultBuilder = BatchResult.builder(); handleShardStartedOnMaster(request);
List<FailedRerouteAllocation.FailedShard> shardRoutingsToBeApplied = new ArrayList<>(tasks.size()); channel.sendResponse(TransportResponse.Empty.INSTANCE);
for (ShardRoutingEntry task : tasks) {
shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure));
} }
ClusterState maybeUpdatedState = currentState;
try {
RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied);
if (result.changed()) {
maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build();
}
batchResultBuilder.successes(tasks);
} catch (Throwable t) {
batchResultBuilder.failures(tasks, t);
}
return batchResultBuilder.build(maybeUpdatedState);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) {
logger.trace("unassigned shards after shard failures. scheduling a reroute.");
routingService.reroute("unassigned shards after shard failures, scheduling a reroute");
}
}
@Override
public void onFailure(String source, Throwable t) {
logger.error("unexpected failure during [{}]", t, source);
}
}
private final ShardStartedClusterStateHandler shardStartedClusterStateHandler =
new ShardStartedClusterStateHandler();
private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
logger.debug("received shard started for {}", shardRoutingEntry);
clusterService.submitStateUpdateTask(
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
shardRoutingEntry,
ClusterStateTaskConfig.build(Priority.URGENT),
shardStartedClusterStateHandler,
shardStartedClusterStateHandler);
} }
class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener { class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor<ShardRoutingEntry>, ClusterStateTaskListener {
@ -223,26 +262,20 @@ public class ShardStateAction extends AbstractComponent {
} }
} }
private class ShardFailedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> { private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = new ShardStartedClusterStateHandler();
@Override private void handleShardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) {
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { logger.debug("received shard started for {}", shardRoutingEntry);
handleShardFailureOnMaster(request);
channel.sendResponse(TransportResponse.Empty.INSTANCE);
}
}
class ShardStartedTransportHandler implements TransportRequestHandler<ShardRoutingEntry> { clusterService.submitStateUpdateTask(
"shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]",
@Override shardRoutingEntry,
public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { ClusterStateTaskConfig.build(Priority.URGENT),
shardStartedOnMaster(request); shardStartedClusterStateHandler,
channel.sendResponse(TransportResponse.Empty.INSTANCE); shardStartedClusterStateHandler);
}
} }
public static class ShardRoutingEntry extends TransportRequest { public static class ShardRoutingEntry extends TransportRequest {
ShardRouting shardRouting; ShardRouting shardRouting;
String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE;
String message; String message;
@ -283,8 +316,13 @@ public class ShardStateAction extends AbstractComponent {
} }
public interface Listener { public interface Listener {
default void onSuccess() {} default void onSuccess() {
default void onShardFailedNoMaster() {} }
default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {}
default void onShardFailedNoMaster() {
}
default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {
}
} }
} }

View File

@ -40,8 +40,8 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.support.LoggerMessageFormat;
import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.settings.loader.SettingsLoader;
import org.elasticsearch.common.xcontent.FromXContentBuilder; import org.elasticsearch.common.xcontent.FromXContentBuilder;
@ -134,13 +134,13 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
//noinspection unchecked //noinspection unchecked
T proto = (T) customPrototypes.get(type); T proto = (T) customPrototypes.get(type);
if (proto == null) { if (proto == null) {
throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "], node likely missing plugins");
} }
return proto; return proto;
} }
public static final String SETTING_READ_ONLY = "cluster.blocks.read_only"; public static final Setting<Boolean> SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER);
public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
@ -745,23 +745,23 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
/** All known byte-sized cluster settings. */ /** All known byte-sized cluster settings. */
public static final Set<String> CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet( public static final Set<String> CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet(
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC)); RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()));
/** All known time cluster settings. */ /** All known time cluster settings. */
public static final Set<String> CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet( public static final Set<String> CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet(
IndicesTTLService.INDICES_TTL_INTERVAL, IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(),
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(),
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(),
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(),
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(),
DiscoverySettings.PUBLISH_TIMEOUT, DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(),
InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD)); InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey()));
/** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't /** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't
* specify a unit. */ * specify a unit. */

View File

@ -47,7 +47,6 @@ import org.elasticsearch.rest.RestStatus;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.Locale;
/** /**
* Service responsible for submitting open/close index requests * Service responsible for submitting open/close index requests
@ -92,14 +91,6 @@ public class MetaDataIndexStateService extends AbstractComponent {
} }
if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { if (indexMetaData.getState() != IndexMetaData.State.CLOSE) {
IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index);
for (IndexShardRoutingTable shard : indexRoutingTable) {
for (ShardRouting shardRouting : shard) {
if (shardRouting.primary() == true && shardRouting.allocatedPostIndexCreate() == false) {
throw new IndexPrimaryShardNotAllocatedException(new Index(index));
}
}
}
indicesToClose.add(index); indicesToClose.add(index);
} }
} }

View File

@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.NodeServicesProvider;
import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.MergeResult;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.indices.InvalidTypeNameException;
import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.percolator.PercolatorService;
@ -237,8 +236,8 @@ public class MetaDataMappingService extends AbstractComponent {
} }
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
Map<String, DocumentMapper> newMappers = new HashMap<>(); String mappingType = request.type();
Map<String, DocumentMapper> existingMappers = new HashMap<>(); CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
for (String index : request.indices()) { for (String index : request.indices()) {
IndexService indexService = indicesService.indexServiceSafe(index); IndexService indexService = indicesService.indexServiceSafe(index);
// try and parse it (no need to add it here) so we can bail early in case of parsing exception // try and parse it (no need to add it here) so we can bail early in case of parsing exception
@ -246,16 +245,13 @@ public class MetaDataMappingService extends AbstractComponent {
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
if (MapperService.DEFAULT_MAPPING.equals(request.type())) { if (MapperService.DEFAULT_MAPPING.equals(request.type())) {
// _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false); newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false);
} else { } else {
newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null);
if (existingMapper != null) { if (existingMapper != null) {
// first, simulate // first, simulate
MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); // this will just throw exceptions in case of problems
// if we have conflicts, throw an exception existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes());
if (mergeResult.hasConflicts()) {
throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}");
}
} else { } else {
// TODO: can we find a better place for this validation? // TODO: can we find a better place for this validation?
// The reason this validation is here is that the mapper service doesn't learn about // The reason this validation is here is that the mapper service doesn't learn about
@ -274,36 +270,31 @@ public class MetaDataMappingService extends AbstractComponent {
} }
} }
} }
newMappers.put(index, newMapper);
if (existingMapper != null) {
existingMappers.put(index, existingMapper);
}
}
String mappingType = request.type();
if (mappingType == null) { if (mappingType == null) {
mappingType = newMappers.values().iterator().next().type(); mappingType = newMapper.type();
} else if (!mappingType.equals(newMappers.values().iterator().next().type())) { } else if (mappingType.equals(newMapper.type()) == false) {
throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition");
} }
}
assert mappingType != null;
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
} }
final Map<String, MappingMetaData> mappings = new HashMap<>(); final Map<String, MappingMetaData> mappings = new HashMap<>();
for (Map.Entry<String, DocumentMapper> entry : newMappers.entrySet()) { for (String index : request.indices()) {
String index = entry.getKey();
// do the actual merge here on the master, and update the mapping source // do the actual merge here on the master, and update the mapping source
DocumentMapper newMapper = entry.getValue();
IndexService indexService = indicesService.indexService(index); IndexService indexService = indicesService.indexService(index);
if (indexService == null) { if (indexService == null) {
continue; continue;
} }
CompressedXContent existingSource = null; CompressedXContent existingSource = null;
if (existingMappers.containsKey(entry.getKey())) { DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType);
existingSource = existingMappers.get(entry.getKey()).mappingSource(); if (existingMapper != null) {
existingSource = existingMapper.mappingSource();
} }
DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes()); DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes());
CompressedXContent updatedSource = mergedMapper.mappingSource(); CompressedXContent updatedSource = mergedMapper.mappingSource();
if (existingSource != null) { if (existingSource != null) {
@ -322,9 +313,9 @@ public class MetaDataMappingService extends AbstractComponent {
} else { } else {
mappings.put(index, new MappingMetaData(mergedMapper)); mappings.put(index, new MappingMetaData(mergedMapper));
if (logger.isDebugEnabled()) { if (logger.isDebugEnabled()) {
logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource); logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
} else if (logger.isInfoEnabled()) { } else if (logger.isInfoEnabled()) {
logger.info("[{}] create_mapping [{}]", index, newMapper.type()); logger.info("[{}] create_mapping [{}]", index, mappingType);
} }
} }
} }

View File

@ -21,12 +21,12 @@ package org.elasticsearch.cluster.routing;
import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntHashMap;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
@ -34,7 +34,6 @@ import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -85,7 +84,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
Map<String, List<ShardRouting>> nodesToShards = new HashMap<>(); Map<String, List<ShardRouting>> nodesToShards = new HashMap<>();
// fill in the nodeToShards with the "live" nodes // fill in the nodeToShards with the "live" nodes
for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) { for (ObjectCursor<DiscoveryNode> cursor : clusterState.nodes().dataNodes().values()) {
nodesToShards.put(cursor.value.id(), new ArrayList<ShardRouting>()); nodesToShards.put(cursor.value.id(), new ArrayList<>());
} }
// fill in the inverse of node -> shards allocated // fill in the inverse of node -> shards allocated
@ -98,21 +97,13 @@ public class RoutingNodes implements Iterable<RoutingNode> {
// by the ShardId, as this is common for primary and replicas. // by the ShardId, as this is common for primary and replicas.
// A replica Set might have one (and not more) replicas with the state of RELOCATING. // A replica Set might have one (and not more) replicas with the state of RELOCATING.
if (shard.assignedToNode()) { if (shard.assignedToNode()) {
List<ShardRouting> entries = nodesToShards.get(shard.currentNodeId()); List<ShardRouting> entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), k -> new ArrayList<>());
if (entries == null) {
entries = new ArrayList<>();
nodesToShards.put(shard.currentNodeId(), entries);
}
final ShardRouting sr = getRouting(shard, readOnly); final ShardRouting sr = getRouting(shard, readOnly);
entries.add(sr); entries.add(sr);
assignedShardsAdd(sr); assignedShardsAdd(sr);
if (shard.relocating()) { if (shard.relocating()) {
entries = nodesToShards.get(shard.relocatingNodeId());
relocatingShards++; relocatingShards++;
if (entries == null) { entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new ArrayList<>());
entries = new ArrayList<>();
nodesToShards.put(shard.relocatingNodeId(), entries);
}
// add the counterpart shard with relocatingNodeId reflecting the source from which // add the counterpart shard with relocatingNodeId reflecting the source from which
// it's relocating from. // it's relocating from.
ShardRouting targetShardRouting = shard.buildTargetRelocatingShard(); ShardRouting targetShardRouting = shard.buildTargetRelocatingShard();
@ -456,11 +447,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
// no unassigned // no unassigned
return; return;
} }
List<ShardRouting> shards = assignedShards.get(shard.shardId()); List<ShardRouting> shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>());
if (shards == null) {
shards = new ArrayList<>();
assignedShards.put(shard.shardId(), shards);
}
assert assertInstanceNotInList(shard, shards); assert assertInstanceNotInList(shard, shards);
shards.add(shard); shards.add(shard);
} }
@ -671,7 +658,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
} }
public void shuffle() { public void shuffle() {
Collections.shuffle(unassigned); Randomness.shuffle(unassigned);
} }
/** /**

View File

@ -19,6 +19,8 @@
package org.elasticsearch.cluster.routing; package org.elasticsearch.cluster.routing;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
@ -267,7 +269,7 @@ public final class ShardRouting implements Streamable, ToXContent {
return shardIdentifier; return shardIdentifier;
} }
public boolean allocatedPostIndexCreate() { public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) {
if (active()) { if (active()) {
return true; return true;
} }
@ -279,6 +281,11 @@ public final class ShardRouting implements Streamable, ToXContent {
return false; return false;
} }
if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) {
// when no shards with this id have ever been active for this index
return false;
}
return true; return true;
} }

View File

@ -39,15 +39,12 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import java.util.ArrayList; import java.util.*;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function; import java.util.function.Function;
import java.util.stream.Collectors; import java.util.stream.Collectors;

View File

@ -34,12 +34,13 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.PriorityComparator; import org.elasticsearch.gateway.PriorityComparator;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
@ -72,42 +73,32 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
*/ */
public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator {
public static final String SETTING_THRESHOLD = "cluster.routing.allocation.balance.threshold"; public static final Setting<Float> INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER);
public static final String SETTING_INDEX_BALANCE_FACTOR = "cluster.routing.allocation.balance.index"; public static final Setting<Float> SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER);
public static final String SETTING_SHARD_BALANCE_FACTOR = "cluster.routing.allocation.balance.shard"; public static final Setting<Float> THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER);
private static final float DEFAULT_INDEX_BALANCE_FACTOR = 0.55f;
private static final float DEFAULT_SHARD_BALANCE_FACTOR = 0.45f;
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
final float indexBalance = settings.getAsFloat(SETTING_INDEX_BALANCE_FACTOR, weightFunction.indexBalance);
final float shardBalance = settings.getAsFloat(SETTING_SHARD_BALANCE_FACTOR, weightFunction.shardBalance);
float threshold = settings.getAsFloat(SETTING_THRESHOLD, BalancedShardsAllocator.this.threshold);
if (threshold <= 0.0f) {
throw new IllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold);
}
BalancedShardsAllocator.this.threshold = threshold;
BalancedShardsAllocator.this.weightFunction = new WeightFunction(indexBalance, shardBalance);
}
}
private volatile WeightFunction weightFunction = new WeightFunction(DEFAULT_INDEX_BALANCE_FACTOR, DEFAULT_SHARD_BALANCE_FACTOR);
private volatile float threshold = 1.0f;
private volatile WeightFunction weightFunction;
private volatile float threshold;
public BalancedShardsAllocator(Settings settings) { public BalancedShardsAllocator(Settings settings) {
this(settings, new NodeSettingsService(settings)); this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
} }
@Inject @Inject
public BalancedShardsAllocator(Settings settings, NodeSettingsService nodeSettingsService) { public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
ApplySettings applySettings = new ApplySettings(); setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings));
applySettings.onRefreshSettings(settings); setThreshold(THRESHOLD_SETTING.get(settings));
nodeSettingsService.addListener(applySettings); clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction);
clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold);
}
private void setWeightFunction(float indexBalance, float shardBalanceFactor) {
weightFunction = new WeightFunction(indexBalance, shardBalanceFactor);
}
private void setThreshold(float threshold) {
this.threshold = threshold;
} }
@Override @Override

View File

@ -24,10 +24,11 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -76,37 +77,12 @@ public class AwarenessAllocationDecider extends AllocationDecider {
public static final String NAME = "awareness"; public static final String NAME = "awareness";
public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES = "cluster.routing.allocation.awareness.attributes"; public static final Setting<String[]> CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER);
public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP = "cluster.routing.allocation.awareness.force."; public static final Setting<Settings> CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER);
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
String[] awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null);
if (awarenessAttributes == null && "".equals(settings.get(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null))) {
awarenessAttributes = Strings.EMPTY_ARRAY; // the empty string resets this
}
if (awarenessAttributes != null) {
logger.info("updating [cluster.routing.allocation.awareness.attributes] from [{}] to [{}]", AwarenessAllocationDecider.this.awarenessAttributes, awarenessAttributes);
AwarenessAllocationDecider.this.awarenessAttributes = awarenessAttributes;
}
Map<String, String[]> forcedAwarenessAttributes = new HashMap<>(AwarenessAllocationDecider.this.forcedAwarenessAttributes);
Map<String, Settings> forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP);
if (!forceGroups.isEmpty()) {
for (Map.Entry<String, Settings> entry : forceGroups.entrySet()) {
String[] aValues = entry.getValue().getAsArray("values");
if (aValues.length > 0) {
forcedAwarenessAttributes.put(entry.getKey(), aValues);
}
}
}
AwarenessAllocationDecider.this.forcedAwarenessAttributes = forcedAwarenessAttributes;
}
}
private String[] awarenessAttributes; private String[] awarenessAttributes;
private Map<String, String[]> forcedAwarenessAttributes; private volatile Map<String, String[]> forcedAwarenessAttributes;
/** /**
* Creates a new {@link AwarenessAllocationDecider} instance * Creates a new {@link AwarenessAllocationDecider} instance
@ -121,24 +97,28 @@ public class AwarenessAllocationDecider extends AllocationDecider {
* @param settings {@link Settings} to use * @param settings {@link Settings} to use
*/ */
public AwarenessAllocationDecider(Settings settings) { public AwarenessAllocationDecider(Settings settings) {
this(settings, new NodeSettingsService(settings)); this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
} }
@Inject @Inject
public AwarenessAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
this.awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES); this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes);
setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings));
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, this::setForcedAwarenessAttributes);
}
forcedAwarenessAttributes = new HashMap<>(); private void setForcedAwarenessAttributes(Settings forceSettings) {
Map<String, Settings> forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP); Map<String, String[]> forcedAwarenessAttributes = new HashMap<>();
Map<String, Settings> forceGroups = forceSettings.getAsGroups();
for (Map.Entry<String, Settings> entry : forceGroups.entrySet()) { for (Map.Entry<String, Settings> entry : forceGroups.entrySet()) {
String[] aValues = entry.getValue().getAsArray("values"); String[] aValues = entry.getValue().getAsArray("values");
if (aValues.length > 0) { if (aValues.length > 0) {
forcedAwarenessAttributes.put(entry.getKey(), aValues); forcedAwarenessAttributes.put(entry.getKey(), aValues);
} }
} }
this.forcedAwarenessAttributes = forcedAwarenessAttributes;
nodeSettingsService.addListener(new ApplySettings());
} }
/** /**
@ -150,6 +130,10 @@ public class AwarenessAllocationDecider extends AllocationDecider {
return this.awarenessAttributes; return this.awarenessAttributes;
} }
private void setAwarenessAttributes(String[] awarenessAttributes) {
this.awarenessAttributes = awarenessAttributes;
}
@Override @Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
return underCapacity(shardRouting, node, allocation, true); return underCapacity(shardRouting, node, allocation, true);

View File

@ -19,13 +19,12 @@
package org.elasticsearch.cluster.routing.allocation.decider; package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.settings.Validator;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.Locale; import java.util.Locale;
@ -49,16 +48,7 @@ import java.util.Locale;
public class ClusterRebalanceAllocationDecider extends AllocationDecider { public class ClusterRebalanceAllocationDecider extends AllocationDecider {
public static final String NAME = "cluster_rebalance"; public static final String NAME = "cluster_rebalance";
public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER);
public static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance";
public static final Validator ALLOCATION_ALLOW_REBALANCE_VALIDATOR = (setting, value, clusterState) -> {
try {
ClusterRebalanceType.parseString(value);
return null;
} catch (IllegalArgumentException e) {
return "the value of " + setting + " must be one of: [always, indices_primaries_active, indices_all_active]";
}
};
/** /**
* An enum representation for the configured re-balance type. * An enum representation for the configured re-balance type.
@ -85,48 +75,28 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider {
} else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) { } else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) {
return ClusterRebalanceType.INDICES_ALL_ACTIVE; return ClusterRebalanceType.INDICES_ALL_ACTIVE;
} }
throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE + ": " + typeString); throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString);
} }
} }
private ClusterRebalanceType type; private volatile ClusterRebalanceType type;
@Inject @Inject
public ClusterRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
String allowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "indices_all_active");
try { try {
type = ClusterRebalanceType.parseString(allowRebalance); type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings);
} catch (IllegalStateException e) { } catch (IllegalStateException e) {
logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, allowRebalance); logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings));
type = ClusterRebalanceType.INDICES_ALL_ACTIVE; type = ClusterRebalanceType.INDICES_ALL_ACTIVE;
} }
logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type.toString().toLowerCase(Locale.ROOT)); logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, type.toString().toLowerCase(Locale.ROOT));
nodeSettingsService.addListener(new ApplySettings()); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType);
} }
class ApplySettings implements NodeSettingsService.Listener { private void setType(ClusterRebalanceType type) {
this.type = type;
@Override
public void onRefreshSettings(Settings settings) {
String newAllowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, null);
if (newAllowRebalance != null) {
ClusterRebalanceType newType = null;
try {
newType = ClusterRebalanceType.parseString(newAllowRebalance);
} catch (IllegalArgumentException e) {
// ignore
}
if (newType != null && newType != ClusterRebalanceAllocationDecider.this.type) {
logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE,
ClusterRebalanceAllocationDecider.this.type.toString().toLowerCase(Locale.ROOT),
newType.toString().toLowerCase(Locale.ROOT));
ClusterRebalanceAllocationDecider.this.type = newType;
}
}
}
} }
@Override @Override

View File

@ -22,8 +22,9 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
/** /**
* Similar to the {@link ClusterRebalanceAllocationDecider} this * Similar to the {@link ClusterRebalanceAllocationDecider} this
@ -41,27 +42,19 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
public static final String NAME = "concurrent_rebalance"; public static final String NAME = "concurrent_rebalance";
public static final String CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE = "cluster.routing.allocation.cluster_concurrent_rebalance"; public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER);
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
int clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance);
if (clusterConcurrentRebalance != ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance) {
logger.info("updating [cluster.routing.allocation.cluster_concurrent_rebalance] from [{}], to [{}]", ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance, clusterConcurrentRebalance);
ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance = clusterConcurrentRebalance;
}
}
}
private volatile int clusterConcurrentRebalance; private volatile int clusterConcurrentRebalance;
@Inject @Inject
public ConcurrentRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
this.clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 2); this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings);
logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance); logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance);
nodeSettingsService.addListener(new ApplySettings()); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance);
}
private void setClusterConcurrentRebalance(int concurrentRebalance) {
clusterConcurrentRebalance = concurrentRebalance;
} }
@Override @Override

View File

@ -22,26 +22,27 @@ package org.elasticsearch.cluster.routing.allocation.decider;
import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.ObjectLookupContainer;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.client.Client; import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfo;
import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.DiskUsage;
import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.RatioValue; import org.elasticsearch.common.unit.RatioValue;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.Set; import java.util.Set;
@ -80,53 +81,11 @@ public class DiskThresholdDecider extends AllocationDecider {
private volatile boolean enabled; private volatile boolean enabled;
private volatile TimeValue rerouteInterval; private volatile TimeValue rerouteInterval;
public static final String CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED = "cluster.routing.allocation.disk.threshold_enabled"; public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER);
public static final String CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.low"; public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER);
public static final String CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.high"; public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER);
public static final String CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS = "cluster.routing.allocation.disk.include_relocations"; public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);;
public static final String CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL = "cluster.routing.allocation.disk.reroute_interval"; public static final Setting<TimeValue> CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER);
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
String newLowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, null);
String newHighWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, null);
Boolean newRelocationsSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, null);
Boolean newEnableSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null);
TimeValue newRerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, null);
if (newEnableSetting != null) {
logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED,
DiskThresholdDecider.this.enabled, newEnableSetting);
DiskThresholdDecider.this.enabled = newEnableSetting;
}
if (newRelocationsSetting != null) {
logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS,
DiskThresholdDecider.this.includeRelocations, newRelocationsSetting);
DiskThresholdDecider.this.includeRelocations = newRelocationsSetting;
}
if (newLowWatermark != null) {
if (!validWatermarkSetting(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) {
throw new ElasticsearchParseException("unable to parse low watermark [{}]", newLowWatermark);
}
logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, newLowWatermark);
DiskThresholdDecider.this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(newLowWatermark);
DiskThresholdDecider.this.freeBytesThresholdLow = thresholdBytesFromWatermark(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK);
}
if (newHighWatermark != null) {
if (!validWatermarkSetting(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) {
throw new ElasticsearchParseException("unable to parse high watermark [{}]", newHighWatermark);
}
logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, newHighWatermark);
DiskThresholdDecider.this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(newHighWatermark);
DiskThresholdDecider.this.freeBytesThresholdHigh = thresholdBytesFromWatermark(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK);
}
if (newRerouteInterval != null) {
logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, newRerouteInterval);
DiskThresholdDecider.this.rerouteInterval = newRerouteInterval;
}
}
}
/** /**
* Listens for a node to go over the high watermark and kicks off an empty * Listens for a node to go over the high watermark and kicks off an empty
@ -231,38 +190,49 @@ public class DiskThresholdDecider extends AllocationDecider {
// It's okay the Client is null here, because the empty cluster info // It's okay the Client is null here, because the empty cluster info
// service will never actually call the listener where the client is // service will never actually call the listener where the client is
// needed. Also this constructor is only used for tests // needed. Also this constructor is only used for tests
this(settings, new NodeSettingsService(settings), EmptyClusterInfoService.INSTANCE, null); this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), EmptyClusterInfoService.INSTANCE, null);
} }
@Inject @Inject
public DiskThresholdDecider(Settings settings, NodeSettingsService nodeSettingsService, ClusterInfoService infoService, Client client) { public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings, ClusterInfoService infoService, Client client) {
super(settings); super(settings);
String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "85%"); final String lowWatermark = CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.get(settings);
String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "90%"); final String highWatermark = CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.get(settings);
setHighWatermark(highWatermark);
if (!validWatermarkSetting(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { setLowWatermark(lowWatermark);
throw new ElasticsearchParseException("unable to parse low watermark [{}]", lowWatermark); this.includeRelocations = CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.get(settings);
} this.rerouteInterval = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(settings);
if (!validWatermarkSetting(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { this.enabled = CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings);
throw new ElasticsearchParseException("unable to parse high watermark [{}]", highWatermark); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, this::setLowWatermark);
} clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, this::setHighWatermark);
// Watermark is expressed in terms of used data, but we need "free" data watermark clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, this::setIncludeRelocations);
this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, this::setRerouteInterval);
this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled);
this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK);
this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK);
this.includeRelocations = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true);
this.rerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, TimeValue.timeValueSeconds(60));
this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true);
nodeSettingsService.addListener(new ApplySettings());
infoService.addListener(new DiskListener(client)); infoService.addListener(new DiskListener(client));
} }
// For Testing private void setIncludeRelocations(boolean includeRelocations) {
ApplySettings newApplySettings() { this.includeRelocations = includeRelocations;
return new ApplySettings(); }
private void setRerouteInterval(TimeValue rerouteInterval) {
this.rerouteInterval = rerouteInterval;
}
private void setEnabled(boolean enabled) {
this.enabled = enabled;
}
private void setLowWatermark(String lowWatermark) {
// Watermark is expressed in terms of used data, but we need "free" data watermark
this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark);
this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
}
private void setHighWatermark(String highWatermark) {
// Watermark is expressed in terms of used data, but we need "free" data watermark
this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark);
this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey());
} }
// For Testing // For Testing
@ -360,7 +330,8 @@ public class DiskThresholdDecider extends AllocationDecider {
} }
// a flag for whether the primary shard has been previously allocated // a flag for whether the primary shard has been previously allocated
boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(); IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData);
// checks for exact byte comparisons // checks for exact byte comparisons
if (freeBytes < freeBytesThresholdLow.bytes()) { if (freeBytes < freeBytesThresholdLow.bytes()) {
@ -580,20 +551,21 @@ public class DiskThresholdDecider extends AllocationDecider {
/** /**
* Checks if a watermark string is a valid percentage or byte size value, * Checks if a watermark string is a valid percentage or byte size value,
* returning true if valid, false if invalid. * @return the watermark value given
*/ */
public boolean validWatermarkSetting(String watermark, String settingName) { public static String validWatermarkSetting(String watermark, String settingName) {
try { try {
RatioValue.parseRatioValue(watermark); RatioValue.parseRatioValue(watermark);
return true;
} catch (ElasticsearchParseException e) { } catch (ElasticsearchParseException e) {
try { try {
ByteSizeValue.parseBytesSizeValue(watermark, settingName); ByteSizeValue.parseBytesSizeValue(watermark, settingName);
return true;
} catch (ElasticsearchParseException ex) { } catch (ElasticsearchParseException ex) {
return false; ex.addSuppressed(e);
throw ex;
} }
} }
return watermark;
} }
private Decision earlyTerminate(RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) { private Decision earlyTerminate(RoutingAllocation allocation, ImmutableOpenMap<String, DiskUsage> usages) {

View File

@ -19,18 +19,20 @@
package org.elasticsearch.cluster.routing.allocation.decider; package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.Locale; import java.util.Locale;
/** /**
* This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE} / * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} /
* {@link #CLUSTER_ROUTING_REBALANCE_ENABLE} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}. * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}.
* The per index settings overrides the cluster wide setting. * The per index settings overrides the cluster wide setting.
* *
* <p> * <p>
@ -54,26 +56,34 @@ import java.util.Locale;
* @see Rebalance * @see Rebalance
* @see Allocation * @see Allocation
*/ */
public class EnableAllocationDecider extends AllocationDecider implements NodeSettingsService.Listener { public class EnableAllocationDecider extends AllocationDecider {
public static final String NAME = "enable"; public static final String NAME = "enable";
public static final String CLUSTER_ROUTING_ALLOCATION_ENABLE = "cluster.routing.allocation.enable"; public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER);
public static final String INDEX_ROUTING_ALLOCATION_ENABLE = "index.routing.allocation.enable"; public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable";
public static final String CLUSTER_ROUTING_REBALANCE_ENABLE = "cluster.routing.rebalance.enable"; public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER);
public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable"; public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable";
private volatile Rebalance enableRebalance; private volatile Rebalance enableRebalance;
private volatile Allocation enableAllocation; private volatile Allocation enableAllocation;
@Inject @Inject
public EnableAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { public EnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
this.enableAllocation = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.ALL.name())); this.enableAllocation = CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings);
this.enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, Rebalance.ALL.name())); this.enableRebalance = CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.get(settings);
nodeSettingsService.addListener(this); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance);
}
public void setEnableRebalance(Rebalance enableRebalance) {
this.enableRebalance = enableRebalance;
}
public void setEnableAllocation(Allocation enableAllocation) {
this.enableAllocation = enableAllocation;
} }
@Override @Override
@ -82,8 +92,8 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored"); return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
} }
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings(); IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex());
String enableIndexValue = indexSettings.get(INDEX_ROUTING_ALLOCATION_ENABLE); String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE);
final Allocation enable; final Allocation enable;
if (enableIndexValue != null) { if (enableIndexValue != null) {
enable = Allocation.parse(enableIndexValue); enable = Allocation.parse(enableIndexValue);
@ -96,7 +106,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
case NONE: case NONE:
return allocation.decision(Decision.NO, NAME, "no allocations are allowed"); return allocation.decision(Decision.NO, NAME, "no allocations are allowed");
case NEW_PRIMARIES: case NEW_PRIMARIES:
if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate() == false) { if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed"); return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed");
} else { } else {
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden"); return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden");
@ -148,25 +158,9 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
} }
} }
@Override
public void onRefreshSettings(Settings settings) {
final Allocation enable = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation.name()));
if (enable != this.enableAllocation) {
logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation, enable);
EnableAllocationDecider.this.enableAllocation = enable;
}
final Rebalance enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance.name()));
if (enableRebalance != this.enableRebalance) {
logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance, enableRebalance);
EnableAllocationDecider.this.enableRebalance = enableRebalance;
}
}
/** /**
* Allocation values or rather their string representation to be used used with * Allocation values or rather their string representation to be used used with
* {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE} * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE}
* via cluster / index settings. * via cluster / index settings.
*/ */
public enum Allocation { public enum Allocation {
@ -192,7 +186,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
/** /**
* Rebalance values or rather their string representation to be used used with * Rebalance values or rather their string representation to be used used with
* {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE} * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE}
* via cluster / index settings. * via cluster / index settings.
*/ */
public enum Rebalance { public enum Rebalance {

View File

@ -25,10 +25,9 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
import java.util.Map;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR;
@ -65,36 +64,23 @@ public class FilterAllocationDecider extends AllocationDecider {
public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include."; public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include.";
public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude."; public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude.";
public static final String CLUSTER_ROUTING_REQUIRE_GROUP = "cluster.routing.allocation.require."; public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER);
public static final String CLUSTER_ROUTING_INCLUDE_GROUP = "cluster.routing.allocation.include."; public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER);
public static final String CLUSTER_ROUTING_EXCLUDE_GROUP = "cluster.routing.allocation.exclude."; public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER);
private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterRequireFilters;
private volatile DiscoveryNodeFilters clusterIncludeFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters;
private volatile DiscoveryNodeFilters clusterExcludeFilters; private volatile DiscoveryNodeFilters clusterExcludeFilters;
@Inject @Inject
public FilterAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
Map<String, String> requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap(); setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.get(settings));
if (requireMap.isEmpty()) { setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.get(settings));
clusterRequireFilters = null; setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.get(settings));
} else { clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters);
clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters);
} clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters);
Map<String, String> includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap();
if (includeMap.isEmpty()) {
clusterIncludeFilters = null;
} else {
clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
}
Map<String, String> excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap();
if (excludeMap.isEmpty()) {
clusterExcludeFilters = null;
} else {
clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
}
nodeSettingsService.addListener(new ApplySettings());
} }
@Override @Override
@ -144,21 +130,13 @@ public class FilterAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters"); return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");
} }
class ApplySettings implements NodeSettingsService.Listener { private void setClusterRequireFilters(Settings settings) {
@Override clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, settings.getAsMap());
public void onRefreshSettings(Settings settings) {
Map<String, String> requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap();
if (!requireMap.isEmpty()) {
clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap);
}
Map<String, String> includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap();
if (!includeMap.isEmpty()) {
clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap);
}
Map<String, String> excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap();
if (!excludeMap.isEmpty()) {
clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap);
} }
private void setClusterIncludeFilters(Settings settings) {
clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap());
} }
private void setClusterExcludeFilters(Settings settings) {
clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap());
} }
} }

View File

@ -24,16 +24,16 @@ import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
/** /**
* This {@link AllocationDecider} limits the number of shards per node on a per * This {@link AllocationDecider} limits the number of shards per node on a per
* index or node-wide basis. The allocator prevents a single node to hold more * index or node-wide basis. The allocator prevents a single node to hold more
* than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and * than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and
* {@value #CLUSTER_TOTAL_SHARDS_PER_NODE} globally during the allocation * <tt>cluster.routing.allocation.total_shards_per_node</tt> globally during the allocation
* process. The limits of this decider can be changed in real-time via a the * process. The limits of this decider can be changed in real-time via a the
* index settings API. * index settings API.
* <p> * <p>
@ -64,26 +64,18 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
* Controls the maximum number of shards per node on a global level. * Controls the maximum number of shards per node on a global level.
* Negative values are interpreted as unlimited. * Negative values are interpreted as unlimited.
*/ */
public static final String CLUSTER_TOTAL_SHARDS_PER_NODE = "cluster.routing.allocation.total_shards_per_node"; public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.CLUSTER);
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
Integer newClusterLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, null);
if (newClusterLimit != null) {
logger.info("updating [{}] from [{}] to [{}]", CLUSTER_TOTAL_SHARDS_PER_NODE,
ShardsLimitAllocationDecider.this.clusterShardLimit, newClusterLimit);
ShardsLimitAllocationDecider.this.clusterShardLimit = newClusterLimit;
}
}
}
@Inject @Inject
public ShardsLimitAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
this.clusterShardLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, -1); this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings);
nodeSettingsService.addListener(new ApplySettings()); clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit);
}
private void setClusterShardLimit(int clusterShardLimit) {
this.clusterShardLimit = clusterShardLimit;
} }
@Override @Override

View File

@ -23,9 +23,10 @@ import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
/** /**
* This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that
@ -38,18 +39,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
/** /**
* Disables relocation of shards that are currently being snapshotted. * Disables relocation of shards that are currently being snapshotted.
*/ */
public static final String CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED = "cluster.routing.allocation.snapshot.relocation_enabled"; public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER);
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
boolean newEnableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation);
if (newEnableRelocation != enableRelocation) {
logger.info("updating [{}] from [{}], to [{}]", CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation, newEnableRelocation);
enableRelocation = newEnableRelocation;
}
}
}
private volatile boolean enableRelocation = false; private volatile boolean enableRelocation = false;
@ -66,14 +56,18 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
* @param settings {@link org.elasticsearch.common.settings.Settings} to use * @param settings {@link org.elasticsearch.common.settings.Settings} to use
*/ */
public SnapshotInProgressAllocationDecider(Settings settings) { public SnapshotInProgressAllocationDecider(Settings settings) {
this(settings, new NodeSettingsService(settings)); this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS));
} }
@Inject @Inject
public SnapshotInProgressAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
enableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation); enableRelocation = CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING.get(settings);
nodeSettingsService.addListener(new ApplySettings()); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, this::setEnableRelocation);
}
private void setEnableRelocation(boolean enableRelocation) {
this.enableRelocation = enableRelocation;
} }
/** /**

View File

@ -19,13 +19,14 @@
package org.elasticsearch.cluster.routing.allocation.decider; package org.elasticsearch.cluster.routing.allocation.decider;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.node.settings.NodeSettingsService;
/** /**
* {@link ThrottlingAllocationDecider} controls the recovery process per node in * {@link ThrottlingAllocationDecider} controls the recovery process per node in
@ -47,27 +48,33 @@ import org.elasticsearch.node.settings.NodeSettingsService;
*/ */
public class ThrottlingAllocationDecider extends AllocationDecider { public class ThrottlingAllocationDecider extends AllocationDecider {
public static final String NAME = "throttling";
public static final String CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = "cluster.routing.allocation.node_initial_primaries_recoveries";
public static final String CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = "cluster.routing.allocation.node_concurrent_recoveries";
public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries";
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2;
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4;
public static final String NAME = "throttling";
public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries";
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER);
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER);
private volatile int primariesInitialRecoveries; private volatile int primariesInitialRecoveries;
private volatile int concurrentRecoveries; private volatile int concurrentRecoveries;
@Inject @Inject
public ThrottlingAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSettings) {
super(settings); super(settings);
this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings);
this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES); this.concurrentRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.get(settings);
this.concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES));
logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries); logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries);
clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, this::setConcurrentRecoveries);
}
nodeSettingsService.addListener(new ApplySettings()); private void setConcurrentRecoveries(int concurrentRecoveries) {
this.concurrentRecoveries = concurrentRecoveries;
}
private void setPrimariesInitialRecoveries(int primariesInitialRecoveries) {
this.primariesInitialRecoveries = primariesInitialRecoveries;
} }
@Override @Override
@ -115,21 +122,4 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "below shard recovery limit of [%d]", concurrentRecoveries); return allocation.decision(Decision.YES, NAME, "below shard recovery limit of [%d]", concurrentRecoveries);
} }
} }
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
int primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, ThrottlingAllocationDecider.this.primariesInitialRecoveries);
if (primariesInitialRecoveries != ThrottlingAllocationDecider.this.primariesInitialRecoveries) {
logger.info("updating [cluster.routing.allocation.node_initial_primaries_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.primariesInitialRecoveries, primariesInitialRecoveries);
ThrottlingAllocationDecider.this.primariesInitialRecoveries = primariesInitialRecoveries;
}
int concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, ThrottlingAllocationDecider.this.concurrentRecoveries);
if (concurrentRecoveries != ThrottlingAllocationDecider.this.concurrentRecoveries) {
logger.info("updating [cluster.routing.allocation.node_concurrent_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.concurrentRecoveries, concurrentRecoveries);
ThrottlingAllocationDecider.this.concurrentRecoveries = concurrentRecoveries;
}
}
}
} }

View File

@ -20,8 +20,19 @@
package org.elasticsearch.cluster.service; package org.elasticsearch.cluster.service;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.AckedClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterState.Builder; import org.elasticsearch.cluster.ClusterState.Builder;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.LocalNodeMasterListener;
import org.elasticsearch.cluster.TimeoutClusterStateListener;
import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
@ -38,20 +49,39 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.*; import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.iterable.Iterables;
import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.DiscoveryService; import org.elasticsearch.discovery.DiscoveryService;
import org.elasticsearch.node.settings.NodeSettingsService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.util.*; import java.util.ArrayList;
import java.util.concurrent.*; import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Queue;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executor;
import java.util.concurrent.Future;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -62,8 +92,8 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
*/ */
public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService { public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
public static final String SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD = "cluster.service.slow_task_logging_threshold"; public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval"; public static final Setting<TimeValue> CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
private final ThreadPool threadPool; private final ThreadPool threadPool;
@ -74,7 +104,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
private final TransportService transportService; private final TransportService transportService;
private final NodeSettingsService nodeSettingsService; private final ClusterSettings clusterSettings;
private final DiscoveryNodeService discoveryNodeService; private final DiscoveryNodeService discoveryNodeService;
private final Version version; private final Version version;
@ -107,33 +137,32 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
@Inject @Inject
public InternalClusterService(Settings settings, DiscoveryService discoveryService, OperationRouting operationRouting, TransportService transportService, public InternalClusterService(Settings settings, DiscoveryService discoveryService, OperationRouting operationRouting, TransportService transportService,
NodeSettingsService nodeSettingsService, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) { ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) {
super(settings); super(settings);
this.operationRouting = operationRouting; this.operationRouting = operationRouting;
this.transportService = transportService; this.transportService = transportService;
this.discoveryService = discoveryService; this.discoveryService = discoveryService;
this.threadPool = threadPool; this.threadPool = threadPool;
this.nodeSettingsService = nodeSettingsService; this.clusterSettings = clusterSettings;
this.discoveryNodeService = discoveryNodeService; this.discoveryNodeService = discoveryNodeService;
this.version = version; this.version = version;
// will be replaced on doStart. // will be replaced on doStart.
this.clusterState = ClusterState.builder(clusterName).build(); this.clusterState = ClusterState.builder(clusterName).build();
this.nodeSettingsService.setClusterService(this); this.clusterSettings.addSettingsUpdateConsumer(CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, this::setSlowTaskLoggingThreshold);
this.nodeSettingsService.addListener(new ApplySettings());
this.reconnectInterval = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL, TimeValue.timeValueSeconds(10)); this.reconnectInterval = CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING.get(settings);
this.slowTaskLoggingThreshold = this.settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, TimeValue.timeValueSeconds(30)); this.slowTaskLoggingThreshold = CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get(settings);
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool); localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
initialBlocks = ClusterBlocks.builder().addGlobalBlock(discoveryService.getNoMasterBlock()); initialBlocks = ClusterBlocks.builder().addGlobalBlock(discoveryService.getNoMasterBlock());
} }
public NodeSettingsService settingsService() { private void setSlowTaskLoggingThreshold(TimeValue slowTaskLoggingThreshold) {
return this.nodeSettingsService; this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
} }
@Override @Override
@ -292,6 +321,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
if (config.timeout() != null) { if (config.timeout() != null) {
updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> { updateTasksExecutor.execute(updateTask, threadPool.scheduler(), config.timeout(), () -> threadPool.generic().execute(() -> {
if (updateTask.processed.getAndSet(true) == false) { if (updateTask.processed.getAndSet(true) == false) {
logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout());
listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source));
}})); }}));
} else { } else {
@ -327,7 +357,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
timeInQueue = 0; timeInQueue = 0;
} }
pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new StringText(source), timeInQueue, pending.executing)); pendingClusterTasks.add(new PendingClusterTask(pending.insertionOrder, pending.priority, new Text(source), timeInQueue, pending.executing));
} }
return pendingClusterTasks; return pendingClusterTasks;
} }
@ -413,6 +443,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
assert batchResult.executionResults != null; assert batchResult.executionResults != null;
assert batchResult.executionResults.size() == toExecute.size()
: String.format(Locale.ROOT, "expected [%d] task result%s but was [%d]", toExecute.size(), toExecute.size() == 1 ? "" : "s", batchResult.executionResults.size());
boolean assertsEnabled = false;
assert (assertsEnabled = true);
if (assertsEnabled) {
for (UpdateTask<T> updateTask : toExecute) {
assert batchResult.executionResults.containsKey(updateTask.task) : "missing task result for [" + updateTask.task + "]";
}
}
ClusterState newClusterState = batchResult.resultingState; ClusterState newClusterState = batchResult.resultingState;
final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>(); final ArrayList<UpdateTask<T>> proccessedListeners = new ArrayList<>();
@ -421,7 +460,13 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString(); assert batchResult.executionResults.containsKey(updateTask.task) : "missing " + updateTask.task.toString();
final ClusterStateTaskExecutor.TaskResult executionResult = final ClusterStateTaskExecutor.TaskResult executionResult =
batchResult.executionResults.get(updateTask.task); batchResult.executionResults.get(updateTask.task);
executionResult.handle(() -> proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex)); executionResult.handle(
() -> proccessedListeners.add(updateTask),
ex -> {
logger.debug("cluster state update task [{}] failed", ex, updateTask.source);
updateTask.listener.onFailure(updateTask.source, ex);
}
);
} }
if (previousClusterState == newClusterState) { if (previousClusterState == newClusterState) {
@ -521,6 +566,15 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
// update the current cluster state // update the current cluster state
clusterState = newClusterState; clusterState = newClusterState;
logger.debug("set local cluster state to version {}", newClusterState.version()); logger.debug("set local cluster state to version {}", newClusterState.version());
try {
// nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency
if (clusterChangedEvent.state().blocks().disableStatePersistence() == false && clusterChangedEvent.metaDataChanged()) {
final Settings incomingSettings = clusterChangedEvent.state().metaData().settings();
clusterSettings.applySettings(incomingSettings);
}
} catch (Exception ex) {
logger.warn("failed to apply cluster settings", ex);
}
for (ClusterStateListener listener : preAppliedListeners) { for (ClusterStateListener listener : preAppliedListeners) {
try { try {
listener.clusterChanged(clusterChangedEvent); listener.clusterChanged(clusterChangedEvent);
@ -560,6 +614,8 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState); task.listener.clusterStateProcessed(task.source, previousClusterState, newClusterState);
} }
executor.clusterStatePublished(newClusterState);
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS))); TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID()); logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
warnAboutSlowTaskIfNeeded(executionTime, source); warnAboutSlowTaskIfNeeded(executionTime, source);
@ -846,12 +902,4 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
} }
} }
} }
class ApplySettings implements NodeSettingsService.Listener {
@Override
public void onRefreshSettings(Settings settings) {
final TimeValue slowTaskLoggingThreshold = settings.getAsTime(SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, InternalClusterService.this.slowTaskLoggingThreshold);
InternalClusterService.this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
}
}
} }

View File

@ -84,7 +84,6 @@ public class Booleans {
* throws exception if string cannot be parsed to boolean * throws exception if string cannot be parsed to boolean
*/ */
public static Boolean parseBooleanExact(String value) { public static Boolean parseBooleanExact(String value) {
boolean isFalse = isExplicitFalse(value); boolean isFalse = isExplicitFalse(value);
if (isFalse) { if (isFalse) {
return false; return false;
@ -94,7 +93,7 @@ public class Booleans {
return true; return true;
} }
throw new IllegalArgumentException("value cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ] "); throw new IllegalArgumentException("Failed to parse value [" + value + "] cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ]");
} }
public static Boolean parseBoolean(String value, Boolean defaultValue) { public static Boolean parseBoolean(String value, Boolean defaultValue) {

View File

@ -0,0 +1,120 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common;
import org.elasticsearch.common.settings.Settings;
import java.lang.reflect.Method;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.ThreadLocalRandom;
/**
* Provides factory methods for producing reproducible sources of
* randomness. Reproducible sources of randomness contribute to
* reproducible tests. When running the Elasticsearch test suite, the
* test runner will establish a global random seed accessible via the
* system property "tests.seed". By seeding a random number generator
* with this global seed, we ensure that instances of Random produced
* with this class produce reproducible sources of randomness under
* when running under the Elasticsearch test suite. Alternatively,
* a reproducible source of randomness can be produced by providing a
* setting a reproducible seed. When running the Elasticsearch server
* process, non-reproducible sources of randomness are provided (unless
* a setting is provided for a module that exposes a seed setting (e.g.,
* DiscoveryService#SETTING_DISCOVERY_SEED)).
*/
public final class Randomness {
private static final Method currentMethod;
private static final Method getRandomMethod;
static {
Method maybeCurrentMethod;
Method maybeGetRandomMethod;
try {
Class<?> clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext");
maybeCurrentMethod = clazz.getMethod("current");
maybeGetRandomMethod = clazz.getMethod("getRandom");
} catch (Throwable t) {
maybeCurrentMethod = null;
maybeGetRandomMethod = null;
}
currentMethod = maybeCurrentMethod;
getRandomMethod = maybeGetRandomMethod;
}
private Randomness() {}
/**
* Provides a reproducible source of randomness seeded by a long
* seed in the settings with the key setting.
*
* @param settings the settings containing the seed
* @param setting the key to access the seed
* @return a reproducible source of randomness
*/
public static Random get(Settings settings, String setting) {
Long maybeSeed = settings.getAsLong(setting, null);
if (maybeSeed != null) {
return new Random(maybeSeed);
} else {
return get();
}
}
/**
* Provides a source of randomness that is reproducible when
* running under the Elasticsearch test suite, and otherwise
* produces a non-reproducible source of randomness. Reproducible
* sources of randomness are created when the system property
* "tests.seed" is set and the security policy allows reading this
* system property. Otherwise, non-reproducible sources of
* randomness are created.
*
* @return a source of randomness
* @throws IllegalStateException if running tests but was not able
* to acquire an instance of Random from
* RandomizedContext or tests are
* running but tests.seed is not set
*/
public static Random get() {
if (currentMethod != null && getRandomMethod != null) {
try {
Object randomizedContext = currentMethod.invoke(null);
return (Random) getRandomMethod.invoke(randomizedContext);
} catch (ReflectiveOperationException e) {
// unexpected, bail
throw new IllegalStateException("running tests but failed to invoke RandomizedContext#getRandom", e);
}
} else {
return getWithoutSeed();
}
}
private static Random getWithoutSeed() {
assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random";
return ThreadLocalRandom.current();
}
public static void shuffle(List<?> list) {
Collections.shuffle(list, get());
}
}

View File

@ -25,26 +25,17 @@ import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException; import java.io.IOException;
import java.util.Locale;
import java.util.Objects; import java.util.Objects;
public class EnvelopeBuilder extends ShapeBuilder { public class EnvelopeBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE; public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE;
public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(); public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder();
protected Coordinate topLeft; private Coordinate topLeft;
protected Coordinate bottomRight; private Coordinate bottomRight;
public EnvelopeBuilder() {
this(Orientation.RIGHT);
}
public EnvelopeBuilder(Orientation orientation) {
super(orientation);
}
public EnvelopeBuilder topLeft(Coordinate topLeft) { public EnvelopeBuilder topLeft(Coordinate topLeft) {
this.topLeft = topLeft; this.topLeft = topLeft;
@ -55,6 +46,10 @@ public class EnvelopeBuilder extends ShapeBuilder {
return topLeft(coordinate(longitude, latitude)); return topLeft(coordinate(longitude, latitude));
} }
public Coordinate topLeft() {
return this.topLeft;
}
public EnvelopeBuilder bottomRight(Coordinate bottomRight) { public EnvelopeBuilder bottomRight(Coordinate bottomRight) {
this.bottomRight = bottomRight; this.bottomRight = bottomRight;
return this; return this;
@ -64,11 +59,14 @@ public class EnvelopeBuilder extends ShapeBuilder {
return bottomRight(coordinate(longitude, latitude)); return bottomRight(coordinate(longitude, latitude));
} }
public Coordinate bottomRight() {
return this.bottomRight;
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();
builder.field(FIELD_TYPE, TYPE.shapeName()); builder.field(FIELD_TYPE, TYPE.shapeName());
builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT));
builder.startArray(FIELD_COORDINATES); builder.startArray(FIELD_COORDINATES);
toXContent(builder, topLeft); toXContent(builder, topLeft);
toXContent(builder, bottomRight); toXContent(builder, bottomRight);
@ -88,7 +86,7 @@ public class EnvelopeBuilder extends ShapeBuilder {
@Override @Override
public int hashCode() { public int hashCode() {
return Objects.hash(orientation, topLeft, bottomRight); return Objects.hash(topLeft, bottomRight);
} }
@Override @Override
@ -100,22 +98,19 @@ public class EnvelopeBuilder extends ShapeBuilder {
return false; return false;
} }
EnvelopeBuilder other = (EnvelopeBuilder) obj; EnvelopeBuilder other = (EnvelopeBuilder) obj;
return Objects.equals(orientation, other.orientation) && return Objects.equals(topLeft, other.topLeft) &&
Objects.equals(topLeft, other.topLeft) &&
Objects.equals(bottomRight, other.bottomRight); Objects.equals(bottomRight, other.bottomRight);
} }
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeBoolean(orientation == Orientation.RIGHT);
writeCoordinateTo(topLeft, out); writeCoordinateTo(topLeft, out);
writeCoordinateTo(bottomRight, out); writeCoordinateTo(bottomRight, out);
} }
@Override @Override
public EnvelopeBuilder readFrom(StreamInput in) throws IOException { public EnvelopeBuilder readFrom(StreamInput in) throws IOException {
Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; return new EnvelopeBuilder()
return new EnvelopeBuilder(orientation)
.topLeft(readCoordinateFrom(in)) .topLeft(readCoordinateFrom(in))
.bottomRight(readCoordinateFrom(in)); .bottomRight(readCoordinateFrom(in));
} }

View File

@ -20,28 +20,26 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Shape; import com.spatial4j.core.shape.Shape;
import org.elasticsearch.common.geo.XShapeCollection; import org.elasticsearch.common.geo.XShapeCollection;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Objects;
public class GeometryCollectionBuilder extends ShapeBuilder { public class GeometryCollectionBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION; public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION;
public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder();
protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>(); protected final ArrayList<ShapeBuilder> shapes = new ArrayList<>();
public GeometryCollectionBuilder() {
this(Orientation.RIGHT);
}
public GeometryCollectionBuilder(Orientation orientation) {
super(orientation);
}
public GeometryCollectionBuilder shape(ShapeBuilder shape) { public GeometryCollectionBuilder shape(ShapeBuilder shape) {
this.shapes.add(shape); this.shapes.add(shape);
return this; return this;
@ -132,4 +130,39 @@ public class GeometryCollectionBuilder extends ShapeBuilder {
//note: ShapeCollection is probably faster than a Multi* geom. //note: ShapeCollection is probably faster than a Multi* geom.
} }
@Override
public int hashCode() {
return Objects.hash(shapes);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
GeometryCollectionBuilder other = (GeometryCollectionBuilder) obj;
return Objects.equals(shapes, other.shapes);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(shapes.size());
for (ShapeBuilder shape : shapes) {
out.writeShape(shape);
}
}
@Override
public GeometryCollectionBuilder readFrom(StreamInput in) throws IOException {
GeometryCollectionBuilder geometryCollectionBuilder = new GeometryCollectionBuilder();
int shapes = in.readVInt();
for (int i = 0; i < shapes; i++) {
geometryCollectionBuilder.shape(in.readShape());
}
return geometryCollectionBuilder;
}
} }

View File

@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Objects;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import com.spatial4j.core.shape.Shape; import com.spatial4j.core.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.Geometry;
@ -34,6 +38,8 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; public static final GeoShapeType TYPE = GeoShapeType.LINESTRING;
public static final LineStringBuilder PROTOTYPE = new LineStringBuilder();
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();
@ -139,4 +145,39 @@ public class LineStringBuilder extends PointCollection<LineStringBuilder> {
} }
return coordinates; return coordinates;
} }
@Override
public int hashCode() {
return Objects.hash(points);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
LineStringBuilder other = (LineStringBuilder) obj;
return Objects.equals(points, other.points);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(points.size());
for (Coordinate point : points) {
writeCoordinateTo(point, out);
}
}
@Override
public LineStringBuilder readFrom(StreamInput in) throws IOException {
LineStringBuilder lineStringBuilder = new LineStringBuilder();
int size = in.readVInt();
for (int i=0; i < size; i++) {
lineStringBuilder.point(readCoordinateFrom(in));
}
return lineStringBuilder;
}
} }

View File

@ -19,6 +19,8 @@
package org.elasticsearch.common.geo.builders; package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import com.spatial4j.core.shape.Shape; import com.spatial4j.core.shape.Shape;
@ -29,11 +31,14 @@ import com.vividsolutions.jts.geom.LineString;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.Objects;
public class MultiLineStringBuilder extends ShapeBuilder { public class MultiLineStringBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING; public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING;
public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder();
private final ArrayList<LineStringBuilder> lines = new ArrayList<>(); private final ArrayList<LineStringBuilder> lines = new ArrayList<>();
public MultiLineStringBuilder linestring(LineStringBuilder line) { public MultiLineStringBuilder linestring(LineStringBuilder line) {
@ -41,6 +46,10 @@ public class MultiLineStringBuilder extends ShapeBuilder {
return this; return this;
} }
public MultiLineStringBuilder linestring(Coordinate[] coordinates) {
return this.linestring(new LineStringBuilder().points(coordinates));
}
public Coordinate[][] coordinates() { public Coordinate[][] coordinates() {
Coordinate[][] result = new Coordinate[lines.size()][]; Coordinate[][] result = new Coordinate[lines.size()][];
for (int i = 0; i < result.length; i++) { for (int i = 0; i < result.length; i++) {
@ -92,4 +101,39 @@ public class MultiLineStringBuilder extends ShapeBuilder {
} }
return jtsGeometry(geometry); return jtsGeometry(geometry);
} }
@Override
public int hashCode() {
return Objects.hash(lines);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
MultiLineStringBuilder other = (MultiLineStringBuilder) obj;
return Objects.equals(lines, other.lines);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(lines.size());
for (LineStringBuilder line : lines) {
line.writeTo(out);
}
}
@Override
public MultiLineStringBuilder readFrom(StreamInput in) throws IOException {
MultiLineStringBuilder multiLineStringBuilder = new MultiLineStringBuilder();
int size = in.readVInt();
for (int i = 0; i < size; i++) {
multiLineStringBuilder.linestring(LineStringBuilder.PROTOTYPE.readFrom(in));
}
return multiLineStringBuilder;
}
} }

View File

@ -22,18 +22,22 @@ package org.elasticsearch.common.geo.builders;
import com.spatial4j.core.shape.Point; import com.spatial4j.core.shape.Point;
import com.spatial4j.core.shape.Shape; import com.spatial4j.core.shape.Shape;
import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Coordinate;
import org.elasticsearch.common.geo.XShapeCollection;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.geo.XShapeCollection;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Objects;
public class MultiPointBuilder extends PointCollection<MultiPointBuilder> { public class MultiPointBuilder extends PointCollection<MultiPointBuilder> {
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT; public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT;
public final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder();
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();
@ -52,7 +56,7 @@ public class MultiPointBuilder extends PointCollection<MultiPointBuilder> {
for (Coordinate coord : points) { for (Coordinate coord : points) {
shapes.add(SPATIAL_CONTEXT.makePoint(coord.x, coord.y)); shapes.add(SPATIAL_CONTEXT.makePoint(coord.x, coord.y));
} }
XShapeCollection multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT); XShapeCollection<Point> multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
multiPoints.setPointsOnly(true); multiPoints.setPointsOnly(true);
return multiPoints; return multiPoints;
} }
@ -61,4 +65,39 @@ public class MultiPointBuilder extends PointCollection<MultiPointBuilder> {
public GeoShapeType type() { public GeoShapeType type() {
return TYPE; return TYPE;
} }
@Override
public int hashCode() {
return Objects.hash(points);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
MultiPointBuilder other = (MultiPointBuilder) obj;
return Objects.equals(points, other.points);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(points.size());
for (Coordinate point : points) {
writeCoordinateTo(point, out);
}
}
@Override
public MultiPointBuilder readFrom(StreamInput in) throws IOException {
MultiPointBuilder multiPointBuilder = new MultiPointBuilder();
int size = in.readVInt();
for (int i=0; i < size; i++) {
multiPointBuilder.point(readCoordinateFrom(in));
}
return multiPointBuilder;
}
} }

View File

@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Objects;
import org.elasticsearch.common.geo.XShapeCollection; import org.elasticsearch.common.geo.XShapeCollection;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import com.spatial4j.core.shape.Shape; import com.spatial4j.core.shape.Shape;
@ -32,26 +36,50 @@ import com.vividsolutions.jts.geom.Coordinate;
public class MultiPolygonBuilder extends ShapeBuilder { public class MultiPolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON; public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON;
public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder();
protected final ArrayList<PolygonBuilder> polygons = new ArrayList<>(); private final ArrayList<PolygonBuilder> polygons = new ArrayList<>();
private Orientation orientation = Orientation.RIGHT;
public MultiPolygonBuilder() { public MultiPolygonBuilder() {
this(Orientation.RIGHT); this(Orientation.RIGHT);
} }
public MultiPolygonBuilder(Orientation orientation) { public MultiPolygonBuilder(Orientation orientation) {
super(orientation); this.orientation = orientation;
} }
public Orientation orientation() {
return this.orientation;
}
/**
* Add a shallow copy of the polygon to the multipolygon. This will apply the orientation of the
* {@link MultiPolygonBuilder} to the polygon if polygon has different orientation.
*/
public MultiPolygonBuilder polygon(PolygonBuilder polygon) { public MultiPolygonBuilder polygon(PolygonBuilder polygon) {
this.polygons.add(polygon); PolygonBuilder pb = new PolygonBuilder(this.orientation);
pb.points(polygon.shell().coordinates(false));
for (LineStringBuilder hole : polygon.holes()) {
pb.hole(hole);
}
this.polygons.add(pb);
return this; return this;
} }
/**
* get the list of polygons
*/
public ArrayList<PolygonBuilder> polygons() {
return polygons;
}
@Override @Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();
builder.field(FIELD_TYPE, TYPE.shapeName()); builder.field(FIELD_TYPE, TYPE.shapeName());
builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT));
builder.startArray(FIELD_COORDINATES); builder.startArray(FIELD_COORDINATES);
for(PolygonBuilder polygon : polygons) { for(PolygonBuilder polygon : polygons) {
builder.startArray(); builder.startArray();
@ -89,4 +117,41 @@ public class MultiPolygonBuilder extends ShapeBuilder {
return new XShapeCollection<>(shapes, SPATIAL_CONTEXT); return new XShapeCollection<>(shapes, SPATIAL_CONTEXT);
//note: ShapeCollection is probably faster than a Multi* geom. //note: ShapeCollection is probably faster than a Multi* geom.
} }
@Override
public int hashCode() {
return Objects.hash(polygons, orientation);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
MultiPolygonBuilder other = (MultiPolygonBuilder) obj;
return Objects.equals(polygons, other.polygons) &&
Objects.equals(orientation, other.orientation);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
orientation.writeTo(out);
out.writeVInt(polygons.size());
for (PolygonBuilder polygon : polygons) {
polygon.writeTo(out);
}
}
@Override
public MultiPolygonBuilder readFrom(StreamInput in) throws IOException {
MultiPolygonBuilder polyBuilder = new MultiPolygonBuilder(Orientation.readFrom(in));
int holes = in.readVInt();
for (int i = 0; i < holes; i++) {
polyBuilder.polygon(PolygonBuilder.PROTOTYPE.readFrom(in));
}
return polyBuilder;
}
} }

View File

@ -32,7 +32,6 @@ import com.vividsolutions.jts.geom.Coordinate;
public class PointBuilder extends ShapeBuilder { public class PointBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POINT; public static final GeoShapeType TYPE = GeoShapeType.POINT;
public static final PointBuilder PROTOTYPE = new PointBuilder(); public static final PointBuilder PROTOTYPE = new PointBuilder();
private Coordinate coordinate; private Coordinate coordinate;

View File

@ -29,6 +29,8 @@ import com.vividsolutions.jts.geom.MultiPolygon;
import com.vividsolutions.jts.geom.Polygon; import com.vividsolutions.jts.geom.Polygon;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -39,6 +41,9 @@ import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicBoolean;
import java.util.List;
import java.util.Locale;
import java.util.Objects;
/** /**
* The {@link PolygonBuilder} implements the groundwork to create polygons. This contains * The {@link PolygonBuilder} implements the groundwork to create polygons. This contains
@ -48,6 +53,11 @@ import java.util.concurrent.atomic.AtomicBoolean;
public class PolygonBuilder extends ShapeBuilder { public class PolygonBuilder extends ShapeBuilder {
public static final GeoShapeType TYPE = GeoShapeType.POLYGON; public static final GeoShapeType TYPE = GeoShapeType.POLYGON;
public static final PolygonBuilder PROTOTYPE = new PolygonBuilder();
private static final Coordinate[][] EMPTY = new Coordinate[0][];
private Orientation orientation = Orientation.RIGHT;
// line string defining the shell of the polygon // line string defining the shell of the polygon
private LineStringBuilder shell; private LineStringBuilder shell;
@ -56,7 +66,7 @@ public class PolygonBuilder extends ShapeBuilder {
private final ArrayList<LineStringBuilder> holes = new ArrayList<>(); private final ArrayList<LineStringBuilder> holes = new ArrayList<>();
public PolygonBuilder() { public PolygonBuilder() {
this(new ArrayList<Coordinate>(), Orientation.RIGHT); this(Orientation.RIGHT);
} }
public PolygonBuilder(Orientation orientation) { public PolygonBuilder(Orientation orientation) {
@ -64,10 +74,14 @@ public class PolygonBuilder extends ShapeBuilder {
} }
public PolygonBuilder(ArrayList<Coordinate> points, Orientation orientation) { public PolygonBuilder(ArrayList<Coordinate> points, Orientation orientation) {
super(orientation); this.orientation = orientation;
this.shell = new LineStringBuilder().points(points); this.shell = new LineStringBuilder().points(points);
} }
public Orientation orientation() {
return this.orientation;
}
public PolygonBuilder point(double longitude, double latitude) { public PolygonBuilder point(double longitude, double latitude) {
shell.point(longitude, latitude); shell.point(longitude, latitude);
return this; return this;
@ -103,6 +117,20 @@ public class PolygonBuilder extends ShapeBuilder {
return this; return this;
} }
/**
* @return the list of holes defined for this polygon
*/
public List<LineStringBuilder> holes() {
return this.holes;
}
/**
* @return the list of points of the shell for this polygon
*/
public LineStringBuilder shell() {
return this.shell;
}
/** /**
* Close the shell of the polygon * Close the shell of the polygon
*/ */
@ -175,6 +203,7 @@ public class PolygonBuilder extends ShapeBuilder {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();
builder.field(FIELD_TYPE, TYPE.shapeName()); builder.field(FIELD_TYPE, TYPE.shapeName());
builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT));
builder.startArray(FIELD_COORDINATES); builder.startArray(FIELD_COORDINATES);
coordinatesArray(builder, params); coordinatesArray(builder, params);
builder.endArray(); builder.endArray();
@ -357,8 +386,6 @@ public class PolygonBuilder extends ShapeBuilder {
return result; return result;
} }
private static final Coordinate[][] EMPTY = new Coordinate[0][];
private static Coordinate[][] holes(Edge[] holes, int numHoles) { private static Coordinate[][] holes(Edge[] holes, int numHoles) {
if (numHoles == 0) { if (numHoles == 0) {
return EMPTY; return EMPTY;
@ -663,4 +690,44 @@ public class PolygonBuilder extends ShapeBuilder {
} }
} }
} }
@Override
public int hashCode() {
return Objects.hash(shell, holes, orientation);
}
@Override
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
PolygonBuilder other = (PolygonBuilder) obj;
return Objects.equals(shell, other.shell) &&
Objects.equals(holes, other.holes) &&
Objects.equals(orientation, other.orientation);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
orientation.writeTo(out);
shell.writeTo(out);
out.writeVInt(holes.size());
for (LineStringBuilder hole : holes) {
hole.writeTo(out);
}
}
@Override
public PolygonBuilder readFrom(StreamInput in) throws IOException {
PolygonBuilder polyBuilder = new PolygonBuilder(Orientation.readFrom(in));
polyBuilder.shell = LineStringBuilder.PROTOTYPE.readFrom(in);
int holes = in.readVInt();
for (int i = 0; i < holes; i++) {
polyBuilder.hole(LineStringBuilder.PROTOTYPE.readFrom(in));
}
return polyBuilder;
}
} }

View File

@ -77,16 +77,10 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
/** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */ /** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */
protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it. protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it.
protected Orientation orientation = Orientation.RIGHT;
protected ShapeBuilder() { protected ShapeBuilder() {
} }
protected ShapeBuilder(Orientation orientation) {
this.orientation = orientation;
}
protected static Coordinate coordinate(double longitude, double latitude) { protected static Coordinate coordinate(double longitude, double latitude) {
return new Coordinate(longitude, latitude); return new Coordinate(longitude, latitude);
} }
@ -186,22 +180,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
return new Coordinate(in.readDouble(), in.readDouble()); return new Coordinate(in.readDouble(), in.readDouble());
} }
public static Orientation orientationFromString(String orientation) {
orientation = orientation.toLowerCase(Locale.ROOT);
switch (orientation) {
case "right":
case "counterclockwise":
case "ccw":
return Orientation.RIGHT;
case "left":
case "clockwise":
case "cw":
return Orientation.LEFT;
default:
throw new IllegalArgumentException("Unknown orientation [" + orientation + "]");
}
}
protected static Coordinate shift(Coordinate coordinate, double dateline) { protected static Coordinate shift(Coordinate coordinate, double dateline) {
if (dateline == 0) { if (dateline == 0) {
return coordinate; return coordinate;
@ -408,6 +386,30 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
public static final Orientation COUNTER_CLOCKWISE = Orientation.RIGHT; public static final Orientation COUNTER_CLOCKWISE = Orientation.RIGHT;
public static final Orientation CW = Orientation.LEFT; public static final Orientation CW = Orientation.LEFT;
public static final Orientation CCW = Orientation.RIGHT; public static final Orientation CCW = Orientation.RIGHT;
public void writeTo (StreamOutput out) throws IOException {
out.writeBoolean(this == Orientation.RIGHT);
}
public static Orientation readFrom (StreamInput in) throws IOException {
return in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT;
}
public static Orientation fromString(String orientation) {
orientation = orientation.toLowerCase(Locale.ROOT);
switch (orientation) {
case "right":
case "counterclockwise":
case "ccw":
return Orientation.RIGHT;
case "left":
case "clockwise":
case "cw":
return Orientation.LEFT;
default:
throw new IllegalArgumentException("Unknown orientation [" + orientation + "]");
}
}
} }
public static final String FIELD_TYPE = "type"; public static final String FIELD_TYPE = "type";
@ -498,7 +500,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
radius = Distance.parseDistance(parser.text()); radius = Distance.parseDistance(parser.text());
} else if (FIELD_ORIENTATION.equals(fieldName)) { } else if (FIELD_ORIENTATION.equals(fieldName)) {
parser.nextToken(); parser.nextToken();
requestedOrientation = orientationFromString(parser.text()); requestedOrientation = Orientation.fromString(parser.text());
} else { } else {
parser.nextToken(); parser.nextToken();
parser.skipChildren(); parser.skipChildren();
@ -524,7 +526,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
case POLYGON: return parsePolygon(node, requestedOrientation, coerce); case POLYGON: return parsePolygon(node, requestedOrientation, coerce);
case MULTIPOLYGON: return parseMultiPolygon(node, requestedOrientation, coerce); case MULTIPOLYGON: return parseMultiPolygon(node, requestedOrientation, coerce);
case CIRCLE: return parseCircle(node, radius); case CIRCLE: return parseCircle(node, radius);
case ENVELOPE: return parseEnvelope(node, requestedOrientation); case ENVELOPE: return parseEnvelope(node);
case GEOMETRYCOLLECTION: return geometryCollections; case GEOMETRYCOLLECTION: return geometryCollections;
default: default:
throw new ElasticsearchParseException("shape type [{}] not included", shapeType); throw new ElasticsearchParseException("shape type [{}] not included", shapeType);
@ -550,7 +552,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
return ShapeBuilders.newCircleBuilder().center(coordinates.coordinate).radius(radius); return ShapeBuilders.newCircleBuilder().center(coordinates.coordinate).radius(radius);
} }
protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates, final Orientation orientation) { protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) {
// validate the coordinate array for envelope type // validate the coordinate array for envelope type
if (coordinates.children.size() != 2) { if (coordinates.children.size() != 2) {
throw new ElasticsearchParseException("invalid number of points [{}] provided for " + throw new ElasticsearchParseException("invalid number of points [{}] provided for " +
@ -564,7 +566,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
uL = new Coordinate(Math.min(uL.x, lR.x), Math.max(uL.y, lR.y)); uL = new Coordinate(Math.min(uL.x, lR.x), Math.max(uL.y, lR.y));
lR = new Coordinate(Math.max(uLtmp.x, lR.x), Math.min(uLtmp.y, lR.y)); lR = new Coordinate(Math.max(uLtmp.x, lR.x), Math.min(uLtmp.y, lR.y));
} }
return ShapeBuilders.newEnvelope(orientation).topLeft(uL).bottomRight(lR); return ShapeBuilders.newEnvelope().topLeft(uL).bottomRight(lR);
} }
protected static void validateMultiPointNode(CoordinateNode coordinates) { protected static void validateMultiPointNode(CoordinateNode coordinates) {
@ -684,8 +686,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
} }
XContentParser.Token token = parser.nextToken(); XContentParser.Token token = parser.nextToken();
GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection( (mapper == null) ? Orientation.RIGHT : mapper GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection();
.fieldType().orientation());
while (token != XContentParser.Token.END_ARRAY) { while (token != XContentParser.Token.END_ARRAY) {
ShapeBuilder shapeBuilder = GeoShapeType.parse(parser); ShapeBuilder shapeBuilder = GeoShapeType.parse(parser);
geometryCollection.shape(shapeBuilder); geometryCollection.shape(shapeBuilder);
@ -700,15 +701,4 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri
public String getWriteableName() { public String getWriteableName() {
return type().shapeName(); return type().shapeName();
} }
// NORELEASE this should be deleted as soon as all shape builders implement writable
@Override
public void writeTo(StreamOutput out) throws IOException {
}
// NORELEASE this should be deleted as soon as all shape builders implement writable
@Override
public ShapeBuilder readFrom(StreamInput in) throws IOException {
return null;
}
} }

View File

@ -0,0 +1,45 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.geo.builders;
import org.elasticsearch.common.geo.ShapesAvailability;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
/**
* Register the shape builder prototypes with the {@link NamedWriteableRegistry}
*/
public class ShapeBuilderRegistry {
@Inject
public ShapeBuilderRegistry(NamedWriteableRegistry namedWriteableRegistry) {
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE);
namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE);
}
}
}

View File

@ -110,15 +110,6 @@ public class ShapeBuilders {
return new GeometryCollectionBuilder(); return new GeometryCollectionBuilder();
} }
/**
* Create a new GeometryCollection
*
* @return a new {@link GeometryCollectionBuilder}
*/
public static GeometryCollectionBuilder newGeometryCollection(ShapeBuilder.Orientation orientation) {
return new GeometryCollectionBuilder(orientation);
}
/** /**
* create a new Circle * create a new Circle
* *
@ -136,13 +127,4 @@ public class ShapeBuilders {
public static EnvelopeBuilder newEnvelope() { public static EnvelopeBuilder newEnvelope() {
return new EnvelopeBuilder(); return new EnvelopeBuilder();
} }
/**
* create a new rectangle
*
* @return a new {@link EnvelopeBuilder}
*/
public static EnvelopeBuilder newEnvelope(ShapeBuilder.Orientation orientation) {
return new EnvelopeBuilder(orientation);
}
} }

View File

@ -33,7 +33,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
@ -256,13 +256,13 @@ public abstract class StreamInput extends InputStream {
if (length == -1) { if (length == -1) {
return null; return null;
} }
return new StringAndBytesText(readBytesReference(length)); return new Text(readBytesReference(length));
} }
public Text readText() throws IOException { public Text readText() throws IOException {
// use StringAndBytes so we can cache the string if its ever converted to it // use StringAndBytes so we can cache the string if its ever converted to it
int length = readInt(); int length = readInt();
return new StringAndBytesText(readBytesReference(length)); return new Text(readBytesReference(length));
} }
@Nullable @Nullable
@ -629,6 +629,13 @@ public abstract class StreamInput extends InputStream {
return readNamedWriteable(QueryBuilder.class); return readNamedWriteable(QueryBuilder.class);
} }
/**
* Reads a {@link ShapeBuilder} from the current stream
*/
public ShapeBuilder readShape() throws IOException {
return readNamedWriteable(ShapeBuilder.class);
}
/** /**
* Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream * Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream
*/ */

View File

@ -32,6 +32,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder;
@ -618,6 +619,13 @@ public abstract class StreamOutput extends OutputStream {
writeNamedWriteable(queryBuilder); writeNamedWriteable(queryBuilder);
} }
/**
* Writes a {@link ShapeBuilder} to the current stream
*/
public void writeShape(ShapeBuilder shapeBuilder) throws IOException {
writeNamedWriteable(shapeBuilder);
}
/** /**
* Writes a {@link ScoreFunctionBuilder} to the current stream * Writes a {@link ScoreFunctionBuilder} to the current stream
*/ */

View File

@ -149,6 +149,10 @@ public final class AllTermQuery extends Query {
return null; return null;
} }
final TermState state = termStates.get(context.ord); final TermState state = termStates.get(context.ord);
if (state == null) {
// Term does not exist in this segment
return null;
}
termsEnum.seekExact(term.bytes(), state); termsEnum.seekExact(term.bytes(), state);
PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS); PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS);
assert docs != null; assert docs != null;

View File

@ -19,21 +19,362 @@
package org.elasticsearch.common.network; package org.elasticsearch.common.network;
import org.elasticsearch.client.support.Headers;
import org.elasticsearch.client.transport.TransportClientNodesService;
import org.elasticsearch.client.transport.support.TransportProxyClient;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.netty.NettyHttpServerTransport;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestHandler;
import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction;
import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction;
import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction;
import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction;
import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction;
import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction;
import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction;
import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction;
import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction;
import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction;
import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction;
import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction;
import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction;
import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction;
import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction;
import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction;
import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction;
import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction;
import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction;
import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction;
import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction;
import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction;
import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction;
import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction;
import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction;
import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction;
import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction;
import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction;
import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction;
import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction;
import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction;
import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction;
import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction;
import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction;
import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction;
import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction;
import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction;
import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction;
import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction;
import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction;
import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction;
import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction;
import org.elasticsearch.rest.action.bulk.RestBulkAction;
import org.elasticsearch.rest.action.cat.AbstractCatAction;
import org.elasticsearch.rest.action.cat.RestAliasAction;
import org.elasticsearch.rest.action.cat.RestAllocationAction;
import org.elasticsearch.rest.action.cat.RestCatAction;
import org.elasticsearch.rest.action.cat.RestFielddataAction;
import org.elasticsearch.rest.action.cat.RestHealthAction;
import org.elasticsearch.rest.action.cat.RestIndicesAction;
import org.elasticsearch.rest.action.cat.RestMasterAction;
import org.elasticsearch.rest.action.cat.RestNodeAttrsAction;
import org.elasticsearch.rest.action.cat.RestNodesAction;
import org.elasticsearch.rest.action.cat.RestPluginsAction;
import org.elasticsearch.rest.action.cat.RestRepositoriesAction;
import org.elasticsearch.rest.action.cat.RestSegmentsAction;
import org.elasticsearch.rest.action.cat.RestShardsAction;
import org.elasticsearch.rest.action.cat.RestSnapshotAction;
import org.elasticsearch.rest.action.cat.RestThreadPoolAction;
import org.elasticsearch.rest.action.delete.RestDeleteAction;
import org.elasticsearch.rest.action.explain.RestExplainAction;
import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction;
import org.elasticsearch.rest.action.get.RestGetAction;
import org.elasticsearch.rest.action.get.RestGetSourceAction;
import org.elasticsearch.rest.action.get.RestHeadAction;
import org.elasticsearch.rest.action.get.RestMultiGetAction;
import org.elasticsearch.rest.action.index.RestIndexAction;
import org.elasticsearch.rest.action.main.RestMainAction;
import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction;
import org.elasticsearch.rest.action.percolate.RestPercolateAction;
import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction;
import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction;
import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction;
import org.elasticsearch.rest.action.search.RestClearScrollAction;
import org.elasticsearch.rest.action.search.RestMultiSearchAction;
import org.elasticsearch.rest.action.search.RestSearchAction;
import org.elasticsearch.rest.action.search.RestSearchScrollAction;
import org.elasticsearch.rest.action.suggest.RestSuggestAction;
import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction;
import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction;
import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction;
import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction;
import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction;
import org.elasticsearch.rest.action.update.RestUpdateAction;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.local.LocalTransport;
import org.elasticsearch.transport.netty.NettyTransport;
import java.util.Arrays;
import java.util.List;
/** /**
* * A module to handle registering and binding all network related classes.
*/ */
public class NetworkModule extends AbstractModule { public class NetworkModule extends AbstractModule {
private final NetworkService networkService; public static final String TRANSPORT_TYPE_KEY = "transport.type";
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
public NetworkModule(NetworkService networkService) { public static final String LOCAL_TRANSPORT = "local";
public static final String NETTY_TRANSPORT = "netty";
public static final String HTTP_TYPE_KEY = "http.type";
public static final String HTTP_ENABLED = "http.enabled";
private static final List<Class<? extends RestHandler>> builtinRestHandlers = Arrays.asList(
RestMainAction.class,
RestNodesInfoAction.class,
RestNodesStatsAction.class,
RestNodesHotThreadsAction.class,
RestClusterStatsAction.class,
RestClusterStateAction.class,
RestClusterHealthAction.class,
RestClusterUpdateSettingsAction.class,
RestClusterGetSettingsAction.class,
RestClusterRerouteAction.class,
RestClusterSearchShardsAction.class,
RestPendingClusterTasksAction.class,
RestPutRepositoryAction.class,
RestGetRepositoriesAction.class,
RestDeleteRepositoryAction.class,
RestVerifyRepositoryAction.class,
RestGetSnapshotsAction.class,
RestCreateSnapshotAction.class,
RestRestoreSnapshotAction.class,
RestDeleteSnapshotAction.class,
RestSnapshotsStatusAction.class,
RestIndicesExistsAction.class,
RestTypesExistsAction.class,
RestGetIndicesAction.class,
RestIndicesStatsAction.class,
RestIndicesSegmentsAction.class,
RestIndicesShardStoresAction.class,
RestGetAliasesAction.class,
RestAliasesExistAction.class,
RestIndexDeleteAliasesAction.class,
RestIndexPutAliasAction.class,
RestIndicesAliasesAction.class,
RestGetIndicesAliasesAction.class,
RestCreateIndexAction.class,
RestDeleteIndexAction.class,
RestCloseIndexAction.class,
RestOpenIndexAction.class,
RestUpdateSettingsAction.class,
RestGetSettingsAction.class,
RestAnalyzeAction.class,
RestGetIndexTemplateAction.class,
RestPutIndexTemplateAction.class,
RestDeleteIndexTemplateAction.class,
RestHeadIndexTemplateAction.class,
RestPutWarmerAction.class,
RestDeleteWarmerAction.class,
RestGetWarmerAction.class,
RestPutMappingAction.class,
RestGetMappingAction.class,
RestGetFieldMappingAction.class,
RestRefreshAction.class,
RestFlushAction.class,
RestSyncedFlushAction.class,
RestForceMergeAction.class,
RestUpgradeAction.class,
RestClearIndicesCacheAction.class,
RestIndexAction.class,
RestGetAction.class,
RestGetSourceAction.class,
RestHeadAction.class,
RestMultiGetAction.class,
RestDeleteAction.class,
org.elasticsearch.rest.action.count.RestCountAction.class,
RestSuggestAction.class,
RestTermVectorsAction.class,
RestMultiTermVectorsAction.class,
RestBulkAction.class,
RestUpdateAction.class,
RestPercolateAction.class,
RestMultiPercolateAction.class,
RestSearchAction.class,
RestSearchScrollAction.class,
RestClearScrollAction.class,
RestMultiSearchAction.class,
RestRenderSearchTemplateAction.class,
RestValidateQueryAction.class,
RestExplainAction.class,
RestRecoveryAction.class,
// Templates API
RestGetSearchTemplateAction.class,
RestPutSearchTemplateAction.class,
RestDeleteSearchTemplateAction.class,
// Scripts API
RestGetIndexedScriptAction.class,
RestPutIndexedScriptAction.class,
RestDeleteIndexedScriptAction.class,
RestFieldStatsAction.class,
// no abstract cat action
RestCatAction.class
);
private static final List<Class<? extends AbstractCatAction>> builtinCatHandlers = Arrays.asList(
RestAllocationAction.class,
RestShardsAction.class,
RestMasterAction.class,
RestNodesAction.class,
RestIndicesAction.class,
RestSegmentsAction.class,
// Fully qualified to prevent interference with rest.action.count.RestCountAction
org.elasticsearch.rest.action.cat.RestCountAction.class,
// Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction
org.elasticsearch.rest.action.cat.RestRecoveryAction.class,
RestHealthAction.class,
org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class,
RestAliasAction.class,
RestThreadPoolAction.class,
RestPluginsAction.class,
RestFielddataAction.class,
RestNodeAttrsAction.class,
RestRepositoriesAction.class,
RestSnapshotAction.class
);
private final NetworkService networkService;
private final Settings settings;
private final boolean transportClient;
private final ExtensionPoint.SelectedType<TransportService> transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class);
private final ExtensionPoint.SelectedType<Transport> transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class);
private final ExtensionPoint.SelectedType<HttpServerTransport> httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class);
private final ExtensionPoint.ClassSet<RestHandler> restHandlers = new ExtensionPoint.ClassSet<>("rest_handler", RestHandler.class);
// we must separate the cat rest handlers so RestCatAction can collect them...
private final ExtensionPoint.ClassSet<AbstractCatAction> catHandlers = new ExtensionPoint.ClassSet<>("cat_handler", AbstractCatAction.class);
/**
* Creates a network module that custom networking classes can be plugged into.
*
* @param networkService A constructed network service object to bind.
* @param settings The settings for the node
* @param transportClient True if only transport classes should be allowed to be registered, false otherwise.
*/
public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) {
this.networkService = networkService; this.networkService = networkService;
this.settings = settings;
this.transportClient = transportClient;
registerTransportService(NETTY_TRANSPORT, TransportService.class);
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
registerTransport(NETTY_TRANSPORT, NettyTransport.class);
if (transportClient == false) {
registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class);
for (Class<? extends AbstractCatAction> catAction : builtinCatHandlers) {
catHandlers.registerExtension(catAction);
}
for (Class<? extends RestHandler> restAction : builtinRestHandlers) {
restHandlers.registerExtension(restAction);
}
}
}
/** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */
public void registerTransportService(String name, Class<? extends TransportService> clazz) {
transportServiceTypes.registerExtension(name, clazz);
}
/** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */
public void registerTransport(String name, Class<? extends Transport> clazz) {
transportTypes.registerExtension(name, clazz);
}
/** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */
// TODO: we need another name than "http transport"....so confusing with transportClient...
public void registerHttpTransport(String name, Class<? extends HttpServerTransport> clazz) {
if (transportClient) {
throw new IllegalArgumentException("Cannot register http transport " + clazz.getName() + " for transport client");
}
httpTransportTypes.registerExtension(name, clazz);
}
/** Adds an additional rest action. */
// TODO: change this further to eliminate the middle man, ie RestController, and just register method and path here
public void registerRestHandler(Class<? extends RestHandler> clazz) {
if (transportClient) {
throw new IllegalArgumentException("Cannot register rest handler " + clazz.getName() + " for transport client");
}
if (AbstractCatAction.class.isAssignableFrom(clazz)) {
catHandlers.registerExtension(clazz.asSubclass(AbstractCatAction.class));
} else {
restHandlers.registerExtension(clazz);
}
} }
@Override @Override
protected void configure() { protected void configure() {
bind(NetworkService.class).toInstance(networkService); bind(NetworkService.class).toInstance(networkService);
bind(NamedWriteableRegistry.class).asEagerSingleton();
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT);
String defaultTransport = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT;
transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, defaultTransport);
if (transportClient) {
bind(Headers.class).asEagerSingleton();
bind(TransportProxyClient.class).asEagerSingleton();
bind(TransportClientNodesService.class).asEagerSingleton();
} else {
if (settings.getAsBoolean(HTTP_ENABLED, true)) {
bind(HttpServer.class).asEagerSingleton();
httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT);
}
bind(RestController.class).asEagerSingleton();
catHandlers.bind(binder());
restHandlers.bind(binder());
}
} }
} }

View File

@ -0,0 +1,252 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.settings;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.component.AbstractComponent;
import java.util.*;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
/**
* A basic setting service that can be used for per-index and per-cluster settings.
* This service offers transactional application of updates settings.
*/
public abstract class AbstractScopedSettings extends AbstractComponent {
private Settings lastSettingsApplied = Settings.EMPTY;
private final List<SettingUpdater> settingUpdaters = new ArrayList<>();
private final Map<String, Setting<?>> complexMatchers = new HashMap<>();
private final Map<String, Setting<?>> keySettings = new HashMap<>();
private final Setting.Scope scope;
protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Scope scope) {
super(settings);
for (Setting<?> entry : settingsSet) {
if (entry.getScope() != scope) {
throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope());
}
if (entry.hasComplexMatcher()) {
complexMatchers.put(entry.getKey(), entry);
} else {
keySettings.put(entry.getKey(), entry);
}
}
this.scope = scope;
}
public Setting.Scope getScope() {
return this.scope;
}
/**
* Applies the given settings to all listeners and rolls back the result after application. This
* method will not change any settings but will fail if any of the settings can't be applied.
*/
public synchronized Settings dryRun(Settings settings) {
final Settings current = Settings.builder().put(this.settings).put(settings).build();
final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
List<RuntimeException> exceptions = new ArrayList<>();
for (SettingUpdater settingUpdater : settingUpdaters) {
try {
if (settingUpdater.hasChanged(current, previous)) {
settingUpdater.getValue(current, previous);
}
} catch (RuntimeException ex) {
exceptions.add(ex);
logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater);
}
}
// here we are exhaustive and record all settings that failed.
ExceptionsHelper.rethrowAndSuppress(exceptions);
return current;
}
/**
* Applies the given settings to all the settings consumers or to none of them. The settings
* will be merged with the node settings before they are applied while given settings override existing node
* settings.
* @param newSettings the settings to apply
* @return the unmerged applied settings
*/
public synchronized Settings applySettings(Settings newSettings) {
if (lastSettingsApplied != null && newSettings.equals(lastSettingsApplied)) {
// nothing changed in the settings, ignore
return newSettings;
}
final Settings current = Settings.builder().put(this.settings).put(newSettings).build();
final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build();
try {
List<Runnable> applyRunnables = new ArrayList<>();
for (SettingUpdater settingUpdater : settingUpdaters) {
try {
applyRunnables.add(settingUpdater.updater(current, previous));
} catch (Exception ex) {
logger.warn("failed to prepareCommit settings for [{}]", ex, settingUpdater);
throw ex;
}
}
for (Runnable settingUpdater : applyRunnables) {
settingUpdater.run();
}
} catch (Exception ex) {
logger.warn("failed to apply settings", ex);
throw ex;
} finally {
}
return lastSettingsApplied = newSettings;
}
/**
* Adds a settings consumer with a predicate that is only evaluated at update time.
* <p>
* Note: Only settings registered in {@link SettingsModule} can be changed dynamically.
* </p>
* @param validator an additional validator that is only applied to updates of this setting.
* This is useful to add additional validation to settings at runtime compared to at startup time.
*/
public synchronized <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer, Consumer<T> validator) {
if (setting != get(setting.getKey())) {
throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]");
}
this.settingUpdaters.add(setting.newUpdater(consumer, logger, validator));
}
/**
* Adds a settings consumer that accepts the values for two settings. The consumer if only notified if one or both settings change.
* <p>
* Note: Only settings registered in {@link SettingsModule} can be changed dynamically.
* </p>
* This method registers a compound updater that is useful if two settings are depending on each other. The consumer is always provided
* with both values even if only one of the two changes.
*/
public synchronized <A, B> void addSettingsUpdateConsumer(Setting<A> a, Setting<B> b, BiConsumer<A, B> consumer) {
if (a != get(a.getKey())) {
throw new IllegalArgumentException("Setting is not registered for key [" + a.getKey() + "]");
}
if (b != get(b.getKey())) {
throw new IllegalArgumentException("Setting is not registered for key [" + b.getKey() + "]");
}
this.settingUpdaters.add(Setting.compoundUpdater(consumer, a, b, logger));
}
/**
* Adds a settings consumer.
* <p>
* Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically.
* </p>
*/
public synchronized <T> void addSettingsUpdateConsumer(Setting<T> setting, Consumer<T> consumer) {
addSettingsUpdateConsumer(setting, consumer, (s) -> {});
}
/**
* Transactional interface to update settings.
* @see Setting
*/
public interface SettingUpdater<T> {
/**
* Returns true if this updaters setting has changed with the current update
* @param current the current settings
* @param previous the previous setting
* @return true if this updaters setting has changed with the current update
*/
boolean hasChanged(Settings current, Settings previous);
/**
* Returns the instance value for the current settings. This method is stateless and idempotent.
* This method will throw an exception if the source of this value is invalid.
*/
T getValue(Settings current, Settings previous);
/**
* Applies the given value to the updater. This methods will actually run the update.
*/
void apply(T value, Settings current, Settings previous);
/**
* Updates this updaters value if it has changed.
* @return <code>true</code> iff the value has been updated.
*/
default boolean apply(Settings current, Settings previous) {
if (hasChanged(current, previous)) {
T value = getValue(current, previous);
apply(value, current, previous);
return true;
}
return false;
}
/**
* Returns a callable runnable that calls {@link #apply(Object, Settings, Settings)} if the settings
* actually changed. This allows to defer the update to a later point in time while keeping type safety.
* If the value didn't change the returned runnable is a noop.
*/
default Runnable updater(Settings current, Settings previous) {
if (hasChanged(current, previous)) {
T value = getValue(current, previous);
return () -> { apply(value, current, previous);};
}
return () -> {};
}
}
/**
* Returns the {@link Setting} for the given key or <code>null</code> if the setting can not be found.
*/
public Setting get(String key) {
Setting<?> setting = keySettings.get(key);
if (setting == null) {
for (Map.Entry<String, Setting<?>> entry : complexMatchers.entrySet()) {
if (entry.getValue().match(key)) {
return entry.getValue();
}
}
} else {
return setting;
}
return null;
}
/**
* Returns <code>true</code> if the setting for the given key is dynamically updateable. Otherwise <code>false</code>.
*/
public boolean hasDynamicSetting(String key) {
final Setting setting = get(key);
return setting != null && setting.isDynamic();
}
/**
* Returns a settings object that contains all settings that are not
* already set in the given source. The diff contains either the default value for each
* setting or the settings value in the given default settings.
*/
public Settings diff(Settings source, Settings defaultSettings) {
Settings.Builder builder = Settings.builder();
for (Setting<?> setting : keySettings.values()) {
if (setting.exists(source) == false) {
builder.put(setting.getKey(), setting.getRaw(defaultSettings));
}
}
return builder.build();
}
}

View File

@ -0,0 +1,141 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.settings;
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.*;
import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.index.store.IndexStoreConfig;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.indices.ttl.IndicesTTLService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService;
import java.util.*;
/**
* Encapsulates all valid cluster level settings.
*/
public final class ClusterSettings extends AbstractScopedSettings {
public ClusterSettings(Settings settings, Set<Setting<?>> settingsSet) {
super(settings, settingsSet, Setting.Scope.CLUSTER);
}
@Override
public synchronized Settings applySettings(Settings newSettings) {
Settings settings = super.applySettings(newSettings);
try {
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
if (entry.getKey().startsWith("logger.")) {
String component = entry.getKey().substring("logger.".length());
if ("_root".equals(component)) {
ESLoggerFactory.getRootLogger().setLevel(entry.getValue());
} else {
ESLoggerFactory.getLogger(component).setLevel(entry.getValue());
}
}
}
} catch (Exception e) {
logger.warn("failed to refresh settings for [{}]", e, "logger");
}
return settings;
}
/**
* Returns <code>true</code> if the settings is a logger setting.
*/
public boolean isLoggerSetting(String key) {
return key.startsWith("logger.");
}
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING,
BalancedShardsAllocator.THRESHOLD_SETTING,
ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING,
ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING,
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING,
ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING,
FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING,
IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING,
IndicesTTLService.INDICES_TTL_INTERVAL_SETTING,
MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING,
MetaData.SETTING_READ_ONLY_SETTING,
RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING,
RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING,
RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING,
RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING,
RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING,
RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING,
ThreadPool.THREADPOOL_GROUP_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING,
ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING,
DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING,
InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING,
SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING,
DestructiveOperations.REQUIRES_NAME_SETTING,
DiscoverySettings.PUBLISH_TIMEOUT_SETTING,
DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING,
DiscoverySettings.COMMIT_TIMEOUT_SETTING,
DiscoverySettings.NO_MASTER_BLOCK_SETTING,
HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING,
InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING,
SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING,
ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING,
TransportService.TRACE_LOG_EXCLUDE_SETTING,
TransportService.TRACE_LOG_INCLUDE_SETTING,
TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING,
ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING,
InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING,
HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING,
HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING,
Transport.TRANSPORT_PROFILES_SETTING,
Transport.TRANSPORT_TCP_COMPRESS)));
}

View File

@ -0,0 +1,461 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.settings;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.MemorySizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.*;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.regex.Pattern;
/**
*/
public class Setting<T> extends ToXContentToBytes {
private final String key;
protected final Function<Settings, String> defaultValue;
private final Function<String, T> parser;
private final boolean dynamic;
private final Scope scope;
/**
* Creates a new Setting instance
* @param key the settings key for this setting.
* @param defaultValue a default value function that returns the default values string representation.
* @param parser a parser that parses the string rep into a complex datatype.
* @param dynamic true iff this setting can be dynamically updateable
* @param scope the scope of this setting
*/
public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null";
this.key = key;
this.defaultValue = defaultValue;
this.parser = parser;
this.dynamic = dynamic;
this.scope = scope;
}
/**
* Returns the settings key or a prefix if this setting is a group setting
* @see #isGroupSetting()
*/
public final String getKey() {
return key;
}
/**
* Returns <code>true</code> iff this setting is dynamically updateable, otherwise <code>false</code>
*/
public final boolean isDynamic() {
return dynamic;
}
/**
* Returns the settings scope
*/
public final Scope getScope() {
return scope;
}
/**
* Returns <code>true</code> iff this setting is a group setting. Group settings represent a set of settings
* rather than a single value. The key, see {@link #getKey()}, in contrast to non-group settings is a prefix like <tt>cluster.store.</tt>
* that matches all settings with this prefix.
*/
boolean isGroupSetting() {
return false;
}
boolean hasComplexMatcher() {
return isGroupSetting();
}
/**
* Returns the default values string representation for this setting.
* @param settings a settings object for settings that has a default value depending on another setting if available
*/
public final String getDefault(Settings settings) {
return defaultValue.apply(settings);
}
/**
* Returns <code>true</code> iff this setting is present in the given settings object. Otherwise <code>false</code>
*/
public final boolean exists(Settings settings) {
return settings.get(key) != null;
}
/**
* Returns the settings value. If the setting is not present in the given settings object the default value is returned
* instead.
*/
public T get(Settings settings) {
String value = getRaw(settings);
try {
return parser.apply(value);
} catch (ElasticsearchParseException ex) {
throw new IllegalArgumentException(ex.getMessage(), ex);
} catch (NumberFormatException ex) {
throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", ex);
} catch (IllegalArgumentException ex) {
throw ex;
} catch (Exception t) {
throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", t);
}
}
/**
* Returns the raw (string) settings value. If the setting is not present in the given settings object the default value is returned
* instead. This is useful if the value can't be parsed due to an invalid value to access the actual value.
*/
public String getRaw(Settings settings) {
return settings.get(key, defaultValue.apply(settings));
}
/**
* Returns <code>true</code> iff the given key matches the settings key or if this setting is a group setting if the
* given key is part of the settings group.
* @see #isGroupSetting()
*/
public boolean match(String toTest) {
return key.equals(toTest);
}
@Override
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("key", key);
builder.field("type", scope.name());
builder.field("dynamic", dynamic);
builder.field("is_group_setting", isGroupSetting());
builder.field("default", defaultValue.apply(Settings.EMPTY));
builder.endObject();
return builder;
}
/**
* The settings scope - settings can either be cluster settings or per index settings.
*/
public enum Scope {
CLUSTER,
INDEX;
}
final AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger) {
return newUpdater(consumer, logger, (s) -> {});
}
AbstractScopedSettings.SettingUpdater newUpdater(Consumer<T> consumer, ESLogger logger, Consumer<T> validator) {
if (isDynamic()) {
return new Updater(consumer, logger, validator);
} else {
throw new IllegalStateException("setting [" + getKey() + "] is not dynamic");
}
}
/**
* this is used for settings that depend on each other... see {@link org.elasticsearch.common.settings.AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and it's
* usage for details.
*/
static <A, B> AbstractScopedSettings.SettingUpdater<Tuple<A, B>> compoundUpdater(final BiConsumer<A,B> consumer, final Setting<A> aSettting, final Setting<B> bSetting, ESLogger logger) {
final AbstractScopedSettings.SettingUpdater<A> aSettingUpdater = aSettting.newUpdater(null, logger);
final AbstractScopedSettings.SettingUpdater<B> bSettingUpdater = bSetting.newUpdater(null, logger);
return new AbstractScopedSettings.SettingUpdater<Tuple<A, B>>() {
@Override
public boolean hasChanged(Settings current, Settings previous) {
return aSettingUpdater.hasChanged(current, previous) || bSettingUpdater.hasChanged(current, previous);
}
@Override
public Tuple<A, B> getValue(Settings current, Settings previous) {
return new Tuple<>(aSettingUpdater.getValue(current, previous), bSettingUpdater.getValue(current, previous));
}
@Override
public void apply(Tuple<A, B> value, Settings current, Settings previous) {
consumer.accept(value.v1(), value.v2());
}
@Override
public String toString() {
return "CompoundUpdater for: " + aSettingUpdater + " and " + bSettingUpdater;
}
};
}
private class Updater implements AbstractScopedSettings.SettingUpdater<T> {
private final Consumer<T> consumer;
private final ESLogger logger;
private final Consumer<T> accept;
public Updater(Consumer<T> consumer, ESLogger logger, Consumer<T> accept) {
this.consumer = consumer;
this.logger = logger;
this.accept = accept;
}
@Override
public String toString() {
return "Updater for: " + Setting.this.toString();
}
@Override
public boolean hasChanged(Settings current, Settings previous) {
final String newValue = getRaw(current);
final String value = getRaw(previous);
assert isGroupSetting() == false : "group settings must override this method";
assert value != null : "value was null but can't be unless default is null which is invalid";
return value.equals(newValue) == false;
}
@Override
public T getValue(Settings current, Settings previous) {
final String newValue = getRaw(current);
final String value = getRaw(previous);
T inst = get(current);
try {
accept.accept(inst);
} catch (Exception | AssertionError e) {
throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + newValue + "]", e);
}
return inst;
}
@Override
public void apply(T value, Settings current, Settings previous) {
logger.info("update [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current));
consumer.accept(value);
}
}
public Setting(String key, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
this(key, (s) -> defaultValue, parser, dynamic, scope);
}
public static Setting<Float> floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope);
}
public static Setting<Float> floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> {
float value = Float.parseFloat(s);
if (value < minValue) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
return value;
}, dynamic, scope);
}
public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope);
}
public static int parseInt(String s, int minValue, String key) {
int value = Integer.parseInt(s);
if (value < minValue) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
return value;
}
public static Setting<Integer> intSetting(String key, int defaultValue, boolean dynamic, Scope scope) {
return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope);
}
public static Setting<Boolean> boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> value.toString(), (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope);
}
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope);
}
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope);
}
public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
Function<String, List<T>> parser = (s) -> {
try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(s)){
XContentParser.Token token = xContentParser.nextToken();
if (token != XContentParser.Token.START_ARRAY) {
throw new IllegalArgumentException("expected START_ARRAY but got " + token);
}
ArrayList<T> list = new ArrayList<>();
while ((token = xContentParser.nextToken()) !=XContentParser.Token.END_ARRAY) {
if (token != XContentParser.Token.VALUE_STRING) {
throw new IllegalArgumentException("expected VALUE_STRING but got " + token);
}
list.add(singleValueParser.apply(xContentParser.text()));
}
return list;
} catch (IOException e) {
throw new IllegalArgumentException("failed to parse array", e);
}
};
return new Setting<List<T>>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) {
private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
@Override
public String getRaw(Settings settings) {
String[] array = settings.getAsArray(key, null);
return array == null ? defaultValue.apply(settings) : arrayToParsableString(array);
}
public boolean match(String toTest) {
return pattern.matcher(toTest).matches();
}
@Override
boolean hasComplexMatcher() {
return true;
}
};
}
private static String arrayToParsableString(String[] array) {
try {
XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent());
builder.startArray();
for (String element : array) {
builder.value(element);
}
builder.endArray();
return builder.string();
} catch (IOException ex) {
throw new ElasticsearchException(ex);
}
}
public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope) {
if (key.endsWith(".") == false) {
throw new IllegalArgumentException("key must end with a '.'");
}
return new Setting<Settings>(key, "", (s) -> null, dynamic, scope) {
@Override
public boolean isGroupSetting() {
return true;
}
@Override
public Settings get(Settings settings) {
return settings.getByPrefix(key);
}
@Override
public boolean match(String toTest) {
return Regex.simpleMatch(key + "*", toTest);
}
@Override
public AbstractScopedSettings.SettingUpdater<Settings> newUpdater(Consumer<Settings> consumer, ESLogger logger, Consumer<Settings> validator) {
if (isDynamic() == false) {
throw new IllegalStateException("setting [" + getKey() + "] is not dynamic");
}
final Setting<?> setting = this;
return new AbstractScopedSettings.SettingUpdater<Settings>() {
@Override
public boolean hasChanged(Settings current, Settings previous) {
Settings currentSettings = get(current);
Settings previousSettings = get(previous);
return currentSettings.equals(previousSettings) == false;
}
@Override
public Settings getValue(Settings current, Settings previous) {
Settings currentSettings = get(current);
Settings previousSettings = get(previous);
try {
validator.accept(currentSettings);
} catch (Exception | AssertionError e) {
throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + previousSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]", e);
}
return currentSettings;
}
@Override
public void apply(Settings value, Settings current, Settings previous) {
consumer.accept(value);
}
@Override
public String toString() {
return "Updater for: " + setting.toString();
}
};
}
};
}
public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
return new Setting<>(key, defaultValue, (s) -> {
TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
if (timeValue.millis() < minValue.millis()) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
return timeValue;
}, dynamic, scope);
}
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope);
}
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, defaultValue, key), dynamic, scope);
}
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> {
final double d = Double.parseDouble(s);
if (d < minValue) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
return d;
}, dynamic, scope);
}
}

View File

@ -597,6 +597,8 @@ public final class Settings implements ToXContent {
return result.toArray(new String[result.size()]); return result.toArray(new String[result.size()]);
} }
/** /**
* Returns group settings for the given setting prefix. * Returns group settings for the given setting prefix.
*/ */
@ -614,6 +616,9 @@ public final class Settings implements ToXContent {
if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') { if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') {
settingPrefix = settingPrefix + "."; settingPrefix = settingPrefix + ".";
} }
return getGroupsInternal(settingPrefix, ignoreNonGrouped);
}
private Map<String, Settings> getGroupsInternal(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException {
// we don't really care that it might happen twice // we don't really care that it might happen twice
Map<String, Map<String, String>> map = new LinkedHashMap<>(); Map<String, Map<String, String>> map = new LinkedHashMap<>();
for (Object o : settings.keySet()) { for (Object o : settings.keySet()) {
@ -643,6 +648,16 @@ public final class Settings implements ToXContent {
} }
return Collections.unmodifiableMap(retVal); return Collections.unmodifiableMap(retVal);
} }
/**
* Returns group settings for the given setting prefix.
*/
public Map<String, Settings> getAsGroups() throws SettingsException {
return getAsGroups(false);
}
public Map<String, Settings> getAsGroups(boolean ignoreNonGrouped) throws SettingsException {
return getGroupsInternal("", ignoreNonGrouped);
}
/** /**
* Returns a parsed version. * Returns a parsed version.
@ -706,7 +721,7 @@ public final class Settings implements ToXContent {
Builder builder = new Builder(); Builder builder = new Builder();
int numberOfSettings = in.readVInt(); int numberOfSettings = in.readVInt();
for (int i = 0; i < numberOfSettings; i++) { for (int i = 0; i < numberOfSettings; i++) {
builder.put(in.readString(), in.readString()); builder.put(in.readString(), in.readOptionalString());
} }
return builder.build(); return builder.build();
} }
@ -715,7 +730,7 @@ public final class Settings implements ToXContent {
out.writeVInt(settings.getAsMap().size()); out.writeVInt(settings.getAsMap().size());
for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) { for (Map.Entry<String, String> entry : settings.getAsMap().entrySet()) {
out.writeString(entry.getKey()); out.writeString(entry.getKey());
out.writeString(entry.getValue()); out.writeOptionalString(entry.getValue());
} }
} }
@ -818,6 +833,10 @@ public final class Settings implements ToXContent {
return this; return this;
} }
public Builder putNull(String key) {
return put(key, (String) null);
}
/** /**
* Sets a setting with the provided setting key and class as value. * Sets a setting with the provided setting key and class as value.
* *

View File

@ -21,6 +21,10 @@ package org.elasticsearch.common.settings;
import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.AbstractModule;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;
/** /**
* A module that binds the provided settings to the {@link Settings} interface. * A module that binds the provided settings to the {@link Settings} interface.
* *
@ -30,15 +34,36 @@ public class SettingsModule extends AbstractModule {
private final Settings settings; private final Settings settings;
private final SettingsFilter settingsFilter; private final SettingsFilter settingsFilter;
private final Map<String, Setting<?>> clusterDynamicSettings = new HashMap<>();
public SettingsModule(Settings settings, SettingsFilter settingsFilter) { public SettingsModule(Settings settings, SettingsFilter settingsFilter) {
this.settings = settings; this.settings = settings;
this.settingsFilter = settingsFilter; this.settingsFilter = settingsFilter;
for (Setting<?> setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) {
registerSetting(setting);
}
} }
@Override @Override
protected void configure() { protected void configure() {
bind(Settings.class).toInstance(settings); bind(Settings.class).toInstance(settings);
bind(SettingsFilter.class).toInstance(settingsFilter); bind(SettingsFilter.class).toInstance(settingsFilter);
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values()));
bind(ClusterSettings.class).toInstance(clusterSettings);
} }
public void registerSetting(Setting<?> setting) {
switch (setting.getScope()) {
case CLUSTER:
if (clusterDynamicSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
}
clusterDynamicSettings.put(setting.getKey(), setting);
break;
case INDEX:
throw new UnsupportedOperationException("not yet implemented");
}
}
} }

View File

@ -103,9 +103,9 @@ public abstract class XContentSettingsLoader implements SettingsLoader {
} else if (token == XContentParser.Token.FIELD_NAME) { } else if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName(); currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NULL) { } else if (token == XContentParser.Token.VALUE_NULL) {
// ignore this serializeValue(settings, sb, path, parser, currentFieldName, true);
} else { } else {
serializeValue(settings, sb, path, parser, currentFieldName); serializeValue(settings, sb, path, parser, currentFieldName, false);
} }
} }
@ -126,31 +126,33 @@ public abstract class XContentSettingsLoader implements SettingsLoader {
} else if (token == XContentParser.Token.FIELD_NAME) { } else if (token == XContentParser.Token.FIELD_NAME) {
fieldName = parser.currentName(); fieldName = parser.currentName();
} else if (token == XContentParser.Token.VALUE_NULL) { } else if (token == XContentParser.Token.VALUE_NULL) {
serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), true);
// ignore // ignore
} else { } else {
serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++)); serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), false);
} }
} }
} }
private void serializeValue(Map<String, String> settings, StringBuilder sb, List<String> path, XContentParser parser, String fieldName) throws IOException { private void serializeValue(Map<String, String> settings, StringBuilder sb, List<String> path, XContentParser parser, String fieldName, boolean isNull) throws IOException {
sb.setLength(0); sb.setLength(0);
for (String pathEle : path) { for (String pathEle : path) {
sb.append(pathEle).append('.'); sb.append(pathEle).append('.');
} }
sb.append(fieldName); sb.append(fieldName);
String key = sb.toString(); String key = sb.toString();
String currentValue = parser.text(); String currentValue = isNull ? null : parser.text();
String previousValue = settings.put(key, currentValue);
if (previousValue != null) { if (settings.containsKey(key)) {
throw new ElasticsearchParseException( throw new ElasticsearchParseException(
"duplicate settings key [{}] found at line number [{}], column number [{}], previous value [{}], current value [{}]", "duplicate settings key [{}] found at line number [{}], column number [{}], previous value [{}], current value [{}]",
key, key,
parser.getTokenLocation().lineNumber, parser.getTokenLocation().lineNumber,
parser.getTokenLocation().columnNumber, parser.getTokenLocation().columnNumber,
previousValue, settings.get(key),
currentValue currentValue
); );
} }
settings.put(key, currentValue);
} }
} }

View File

@ -1,82 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.text;
import java.nio.charset.StandardCharsets;
import org.elasticsearch.common.bytes.BytesReference;
/**
* A {@link BytesReference} representation of the text, will always convert on the fly to a {@link String}.
*/
public class BytesText implements Text {
private BytesReference bytes;
private int hash;
public BytesText(BytesReference bytes) {
this.bytes = bytes;
}
@Override
public boolean hasBytes() {
return true;
}
@Override
public BytesReference bytes() {
return bytes;
}
@Override
public boolean hasString() {
return false;
}
@Override
public String string() {
// TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
if (!bytes.hasArray()) {
bytes = bytes.toBytesArray();
}
return new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
}
@Override
public String toString() {
return string();
}
@Override
public int hashCode() {
if (hash == 0) {
hash = bytes.hashCode();
}
return hash;
}
@Override
public boolean equals(Object obj) {
return bytes().equals(((Text) obj).bytes());
}
@Override
public int compareTo(Text text) {
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
}
}

View File

@ -1,111 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.text;
import java.nio.charset.StandardCharsets;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
/**
* Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if
* the other is requests, caches the other one in a local reference so no additional conversion will be needed.
*/
public class StringAndBytesText implements Text {
public static final Text[] EMPTY_ARRAY = new Text[0];
public static Text[] convertFromStringArray(String[] strings) {
if (strings.length == 0) {
return EMPTY_ARRAY;
}
Text[] texts = new Text[strings.length];
for (int i = 0; i < strings.length; i++) {
texts[i] = new StringAndBytesText(strings[i]);
}
return texts;
}
private BytesReference bytes;
private String text;
private int hash;
public StringAndBytesText(BytesReference bytes) {
this.bytes = bytes;
}
public StringAndBytesText(String text) {
this.text = text;
}
@Override
public boolean hasBytes() {
return bytes != null;
}
@Override
public BytesReference bytes() {
if (bytes == null) {
bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8));
}
return bytes;
}
@Override
public boolean hasString() {
return text != null;
}
@Override
public String string() {
// TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil
if (text == null) {
if (!bytes.hasArray()) {
bytes = bytes.toBytesArray();
}
text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8);
}
return text;
}
@Override
public String toString() {
return string();
}
@Override
public int hashCode() {
if (hash == 0) {
hash = bytes().hashCode();
}
return hash;
}
@Override
public boolean equals(Object obj) {
if (obj == null) {
return false;
}
return bytes().equals(((Text) obj).bytes());
}
@Override
public int compareTo(Text text) {
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
}
}

View File

@ -1,94 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.common.text;
import java.nio.charset.StandardCharsets;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
/**
* A {@link String} only representation of the text. Will always convert to bytes on the fly.
*/
public class StringText implements Text {
public static final Text[] EMPTY_ARRAY = new Text[0];
public static Text[] convertFromStringArray(String[] strings) {
if (strings.length == 0) {
return EMPTY_ARRAY;
}
Text[] texts = new Text[strings.length];
for (int i = 0; i < strings.length; i++) {
texts[i] = new StringText(strings[i]);
}
return texts;
}
private final String text;
private int hash;
public StringText(String text) {
this.text = text;
}
@Override
public boolean hasBytes() {
return false;
}
@Override
public BytesReference bytes() {
return new BytesArray(text.getBytes(StandardCharsets.UTF_8));
}
@Override
public boolean hasString() {
return true;
}
@Override
public String string() {
return text;
}
@Override
public String toString() {
return string();
}
@Override
public int hashCode() {
// we use bytes here so we can be consistent with other text implementations
if (hash == 0) {
hash = bytes().hashCode();
}
return hash;
}
@Override
public boolean equals(Object obj) {
// we use bytes here so we can be consistent with other text implementations
return bytes().equals(((Text) obj).bytes());
}
@Override
public int compareTo(Text text) {
return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes());
}
}

Some files were not shown because too many files have changed in this diff Show More