diff --git a/.gitignore b/.gitignore index 31f2aa5fc66..add7a22e840 100644 --- a/.gitignore +++ b/.gitignore @@ -1,37 +1,42 @@ + +# intellij files .idea/ -.gradle/ *.iml *.ipr *.iws -work/ -/data/ -logs/ -.DS_Store -build/ -generated-resources/ -**/.local* -docs/html/ -docs/build.log -/tmp/ -backwards/ -html_docs -.vagrant/ -## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects) -## All files (.project, .classpath, .settings/*) should be generated through Maven which -## will correctly set the classpath based on the declared dependencies and write settings -## files to ensure common coding style across Eclipse and IDEA. +# eclipse files .project .classpath eclipse-build .settings -## netbeans ignores +# netbeans files nb-configuration.xml nbactions.xml -dependency-reduced-pom.xml +# gradle stuff +.gradle/ +build/ +generated-resources/ -# old patterns specific to maven +# maven stuff (to be removed when trunk becomes 4.x) *-execution-hints.log target/ +dependency-reduced-pom.xml + +# testing stuff +**/.local* +.vagrant/ + +# osx stuff +.DS_Store + +# needed in case docs build is run...maybe we can configure doc build to generate files under build? +html_docs + +# random old stuff that we should look at the necessity of... +/tmp/ +backwards/ + + diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 569c16b0747..fef23d0cd3d 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -290,14 +290,14 @@ The REST tests are run automatically when executing the "gradle check" command. REST tests use the following command: --------------------------------------------------------------------------- -gradle :distribution:tar:integTest \ +gradle :distribution:integ-test-zip:integTest \ -Dtests.class=org.elasticsearch.test.rest.RestIT --------------------------------------------------------------------------- A specific test case can be run with --------------------------------------------------------------------------- -gradle :distribution:tar:integTest \ +gradle :distribution:integ-test-zip:integTest \ -Dtests.class=org.elasticsearch.test.rest.RestIT \ -Dtests.method="test {p0=cat.shards/10_basic/Help}" --------------------------------------------------------------------------- diff --git a/build.gradle b/build.gradle index 878f7a9a915..c31fe88f5d2 100644 --- a/build.gradle +++ b/build.gradle @@ -97,6 +97,7 @@ subprojects { // the "value" -quiet is added, separated by a space. This is ok since the javadoc // command already adds -quiet, so we are just duplicating it // see https://discuss.gradle.org/t/add-custom-javadoc-option-that-does-not-take-an-argument/5959 + javadoc.options.encoding='UTF8' javadoc.options.addStringOption('Xdoclint:all,-missing', '-quiet') } } @@ -108,7 +109,7 @@ subprojects { ext.projectSubstitutions = [ "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:elasticsearch:${version}": ':core', - "org.elasticsearch:test-framework:${version}": ':test-framework', + "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', "org.elasticsearch.distribution.tar:elasticsearch:${version}": ':distribution:tar', @@ -130,8 +131,8 @@ subprojects { // the dependency is added. gradle.projectsEvaluated { allprojects { - if (project.path == ':test-framework') { - // :test-framework:test cannot run before and after :core:test + if (project.path == ':test:framework') { + // :test:framework:test cannot run before and after :core:test return } configurations.all { @@ -168,6 +169,30 @@ gradle.projectsEvaluated { // intellij configuration allprojects { apply plugin: 'idea' + + idea { + module { + // same as for the IntelliJ Gradle tooling integration + inheritOutputDirs = false + outputDir = file('build/classes/main') + testOutputDir = file('build/classes/test') + + iml { + // fix so that Gradle idea plugin properly generates support for resource folders + // see also https://issues.gradle.org/browse/GRADLE-2975 + withXml { + it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/main/resources' }.each { + it.attributes().remove('isTestSource') + it.attributes().put('type', 'java-resource') + } + it.asNode().component.content.sourceFolder.findAll { it.@url == 'file://$MODULE_DIR$/src/test/resources' }.each { + it.attributes().remove('isTestSource') + it.attributes().put('type', 'java-test-resource') + } + } + } + } + } } idea { diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index e46f9cb33c0..a0f06343d30 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -63,6 +63,7 @@ dependencies { compile 'com.perforce:p4java:2012.3.551082' // THIS IS SUPPOSED TO BE OPTIONAL IN THE FUTURE.... compile 'de.thetaphi:forbiddenapis:2.0' compile 'com.bmuschko:gradle-nexus-plugin:2.3.1' + compile 'org.apache.rat:apache-rat:0.11' } processResources { diff --git a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy index 33e16feb44a..ccb5d5904bf 100644 --- a/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy +++ b/buildSrc/src/main/groovy/com/carrotsearch/gradle/junit4/RandomizedTestingTask.groovy @@ -78,7 +78,7 @@ class RandomizedTestingTask extends DefaultTask { @Input String argLine = null - Map systemProperties = new HashMap<>() + Map systemProperties = new HashMap<>() PatternFilterable patternSet = new PatternSet() RandomizedTestingTask() { @@ -100,7 +100,7 @@ class RandomizedTestingTask extends DefaultTask { jvmArgs.add(argument) } - void systemProperty(String property, String value) { + void systemProperty(String property, Object value) { systemProperties.put(property, value) } @@ -245,8 +245,8 @@ class RandomizedTestingTask extends DefaultTask { exclude(name: excludePattern) } } - for (Map.Entry prop : systemProperties) { - sysproperty key: prop.getKey(), value: prop.getValue() + for (Map.Entry prop : systemProperties) { + sysproperty key: prop.getKey(), value: prop.getValue().toString() } makeListeners() } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy new file mode 100644 index 00000000000..b713c00ed8d --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/AntTask.groovy @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gradle + +import org.apache.tools.ant.BuildException +import org.apache.tools.ant.BuildListener +import org.apache.tools.ant.BuildLogger +import org.apache.tools.ant.DefaultLogger +import org.apache.tools.ant.Project +import org.gradle.api.DefaultTask +import org.gradle.api.GradleException +import org.gradle.api.tasks.Input +import org.gradle.api.tasks.Optional +import org.gradle.api.tasks.TaskAction + +import java.nio.charset.Charset + +/** + * A task which will run ant commands. + * + * Logging for the task is customizable for subclasses by overriding makeLogger. + */ +public class AntTask extends DefaultTask { + + /** + * A buffer that will contain the output of the ant code run, + * if the output was not already written directly to stdout. + */ + public final ByteArrayOutputStream outputBuffer = new ByteArrayOutputStream() + + @TaskAction + final void executeTask() { + AntBuilder ant = new AntBuilder() + + // remove existing loggers, we add our own + List toRemove = new ArrayList<>(); + for (BuildListener listener : ant.project.getBuildListeners()) { + if (l instanceof BuildLogger) { + toRemove.add(listener); + } + } + for (BuildLogger listener : toRemove) { + ant.project.removeBuildListener(listener) + } + + final int outputLevel = logger.isDebugEnabled() ? Project.MSG_DEBUG : (logger.isInfoEnabled() ? Project.MSG_INFO : Project.MSG_WARN) + final PrintStream stream = useStdout() ? System.out : new PrintStream(outputBuffer, true, Charset.defaultCharset().name()) + BuildLogger antLogger = makeLogger(stream, outputLevel) + + ant.project.addBuildListener(antLogger) + try { + runAnt(ant) + } catch (BuildException e) { + // ant failed, so see if we have buffered output to emit, then rethrow the failure + String buffer = outputBuffer.toString() + if (buffer.isEmpty() == false) { + logger.error("=== Ant output ===\n${buffer}") + } + throw e + } + } + + /** Runs the doAnt closure. This can be overridden by subclasses instead of having to set a closure. */ + protected void runAnt(AntBuilder ant) { + if (doAnt == null) { + throw new GradleException("Missing doAnt for ${name}") + } + doAnt(ant) + } + + /** Create the logger the ant runner will use, with the given stream for error/output. */ + protected BuildLogger makeLogger(PrintStream stream, int outputLevel) { + return new DefaultLogger( + errorPrintStream: stream, + outputPrintStream: stream, + messageOutputLevel: outputLevel) + } + + /** + * Returns true if the ant logger should write to stdout, or false if to the buffer. + * The default implementation writes to the buffer when gradle info logging is disabled. + */ + protected boolean useStdout() { + return logger.isInfoEnabled() + } + + +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index c4d0ced6b5c..10f479ee100 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -202,7 +202,7 @@ class BuildPlugin implements Plugin { // force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself Closure disableTransitiveDeps = { ModuleDependency dep -> - if (!(dep instanceof ProjectDependency) && dep.getGroup() != 'org.elasticsearch') { + if (!(dep instanceof ProjectDependency) && dep.group.startsWith('org.elasticsearch') == false) { dep.transitive = false // also create a configuration just for this dependency version, so that later @@ -302,6 +302,7 @@ class BuildPlugin implements Plugin { options.compilerArgs << '-profile' << project.compactProfile } options.encoding = 'UTF-8' + //options.incremental = true } } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 0d936ab0e15..9a000ab3296 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -60,7 +60,7 @@ public class PluginBuildPlugin extends BuildPlugin { private static void configureDependencies(Project project) { project.dependencies { provided "org.elasticsearch:elasticsearch:${project.versions.elasticsearch}" - testCompile "org.elasticsearch:test-framework:${project.versions.elasticsearch}" + testCompile "org.elasticsearch.test:framework:${project.versions.elasticsearch}" // we "upgrade" these optional deps to provided for plugins, since they will run // with a full elasticsearch server that includes optional deps provided "com.spatial4j:spatial4j:${project.versions.spatial4j}" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy index 67aa26c28ad..7d8982e3f2d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ForbiddenPatternsTask.groovy @@ -62,11 +62,14 @@ public class ForbiddenPatternsTask extends DefaultTask { patterns.put('nocommit', /nocommit/) patterns.put('tab', /\t/) patterns.put('wildcard imports', /^\s*import.*\.\*/) + + inputs.property("excludes", filesFilter.excludes) + inputs.property("rules", patterns) } /** Adds a file glob pattern to be excluded */ public void exclude(String... excludes) { - this.filesFilter.exclude(excludes) + filesFilter.exclude(excludes) } /** Adds a pattern to forbid. T */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy new file mode 100644 index 00000000000..39cf55c905b --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/LicenseHeadersTask.groovy @@ -0,0 +1,122 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit + +import org.apache.rat.anttasks.Report +import org.apache.rat.anttasks.SubstringLicenseMatcher +import org.apache.rat.license.SimpleLicenseFamily +import org.elasticsearch.gradle.AntTask +import org.gradle.api.tasks.SourceSet + +import java.nio.file.Files + +/** + * Checks files for license headers. + *

+ * This is a port of the apache lucene check + */ +public class LicenseHeadersTask extends AntTask { + + LicenseHeadersTask() { + description = "Checks sources for missing, incorrect, or unacceptable license headers" + } + + @Override + protected void runAnt(AntBuilder ant) { + ant.project.addTaskDefinition('ratReport', Report) + ant.project.addDataTypeDefinition('substringMatcher', SubstringLicenseMatcher) + ant.project.addDataTypeDefinition('approvedLicense', SimpleLicenseFamily) + + // create a file for the log to go to under reports/ + File reportDir = new File(project.buildDir, "reports/licenseHeaders") + reportDir.mkdirs() + File reportFile = new File(reportDir, "rat.log") + Files.deleteIfExists(reportFile.toPath()) + + // run rat, going to the file + ant.ratReport(reportFile: reportFile.absolutePath, addDefaultLicenseMatchers: true) { + // checks all the java sources (allJava) + for (SourceSet set : project.sourceSets) { + for (File dir : set.allJava.srcDirs) { + // sometimes these dirs don't exist, e.g. site-plugin has no actual java src/main... + if (dir.exists()) { + ant.fileset(dir: dir) + } + } + } + + // BSD 4-clause stuff (is disallowed below) + // we keep this here, in case someone adds BSD code for some reason, it should never be allowed. + substringMatcher(licenseFamilyCategory: "BSD4 ", + licenseFamilyName: "Original BSD License (with advertising clause)") { + pattern(substring: "All advertising materials") + } + + // Apache + substringMatcher(licenseFamilyCategory: "AL ", + licenseFamilyName: "Apache") { + // Apache license (ES) + pattern(substring: "Licensed to Elasticsearch under one or more contributor") + // Apache license (ASF) + pattern(substring: "Licensed to the Apache Software Foundation (ASF) under") + // this is the old-school one under some files + pattern(substring: "Licensed under the Apache License, Version 2.0 (the \"License\")") + } + + // Generated resources + substringMatcher(licenseFamilyCategory: "GEN ", + licenseFamilyName: "Generated") { + // parsers generated by antlr + pattern(substring: "ANTLR GENERATED CODE") + } + + // approved categories + approvedLicense(familyName: "Apache") + approvedLicense(familyName: "Generated") + } + + // check the license file for any errors, this should be fast. + boolean zeroUnknownLicenses = false + boolean foundProblemsWithFiles = false + reportFile.eachLine('UTF-8') { line -> + if (line.startsWith("0 Unknown Licenses")) { + zeroUnknownLicenses = true + } + + if (line.startsWith(" !")) { + foundProblemsWithFiles = true + } + } + + if (zeroUnknownLicenses == false || foundProblemsWithFiles) { + // print the unapproved license section, usually its all you need to fix problems. + int sectionNumber = 0 + reportFile.eachLine('UTF-8') { line -> + if (line.startsWith("*******************************")) { + sectionNumber++ + } else { + if (sectionNumber == 2) { + logger.error(line) + } + } + } + throw new IllegalStateException("License header problems were found! Full details: " + reportFile.absolutePath) + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 04878d979e9..f99032e1e2d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -34,7 +34,9 @@ class PrecommitTasks { List precommitTasks = [ configureForbiddenApis(project), project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class), - project.tasks.create('jarHell', JarHellTask.class)] + project.tasks.create('licenseHeaders', LicenseHeadersTask.class), + project.tasks.create('jarHell', JarHellTask.class), + project.tasks.create('thirdPartyAudit', ThirdPartyAuditTask.class)] // tasks with just tests don't need dependency licenses, so this flag makes adding // the task optional diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy new file mode 100644 index 00000000000..2ee4c29d614 --- /dev/null +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/ThirdPartyAuditTask.groovy @@ -0,0 +1,205 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle.precommit + +import org.apache.tools.ant.BuildLogger +import org.apache.tools.ant.DefaultLogger +import org.apache.tools.ant.Project +import org.elasticsearch.gradle.AntTask +import org.gradle.api.artifacts.Configuration +import org.gradle.api.file.FileCollection + +import java.nio.file.FileVisitResult +import java.nio.file.Files +import java.nio.file.Path +import java.nio.file.SimpleFileVisitor +import java.nio.file.attribute.BasicFileAttributes + +/** + * Basic static checking to keep tabs on third party JARs + */ +public class ThirdPartyAuditTask extends AntTask { + + // true to be lenient about MISSING CLASSES + private boolean missingClasses; + + // patterns for classes to exclude, because we understand their issues + private String[] excludes = new String[0]; + + ThirdPartyAuditTask() { + dependsOn(project.configurations.testCompile) + description = "Checks third party JAR bytecode for missing classes, use of internal APIs, and other horrors'" + } + + /** + * Set to true to be lenient with missing classes. By default this check will fail if it finds + * MISSING CLASSES. This means the set of jars is incomplete. However, in some cases + * this can be due to intentional exclusions that are well-tested and understood. + */ + public void setMissingClasses(boolean value) { + missingClasses = value; + } + + /** + * Returns true if leniency about missing classes is enabled. + */ + public boolean isMissingClasses() { + return missingClasses; + } + + /** + * classes that should be excluded from the scan, + * e.g. because we know what sheisty stuff those particular classes are up to. + */ + public void setExcludes(String[] classes) { + for (String s : classes) { + if (s.indexOf('*') != -1) { + throw new IllegalArgumentException("illegal third party audit exclusion: '" + s + "', wildcards are not permitted!") + } + } + excludes = classes; + } + + /** + * Returns current list of exclusions. + */ + public String[] getExcludes() { + return excludes; + } + + @Override + protected BuildLogger makeLogger(PrintStream stream, int outputLevel) { + return new DefaultLogger( + errorPrintStream: stream, + outputPrintStream: stream, + // ignore passed in outputLevel for now, until we are filtering warning messages + messageOutputLevel: Project.MSG_ERR) + } + + @Override + protected void runAnt(AntBuilder ant) { + ant.project.addTaskDefinition('thirdPartyAudit', de.thetaphi.forbiddenapis.ant.AntTask) + + // we only want third party dependencies. + FileCollection jars = project.configurations.testCompile.fileCollection({ dependency -> + dependency.group.startsWith("org.elasticsearch") == false + }) + + // we don't want provided dependencies, which we have already scanned. e.g. don't + // scan ES core's dependencies for every single plugin + Configuration provided = project.configurations.findByName('provided') + if (provided != null) { + jars -= provided + } + + // no dependencies matched, we are done + if (jars.isEmpty()) { + return; + } + + + // print which jars we are going to scan, always + // this is not the time to try to be succinct! Forbidden will print plenty on its own! + Set names = new HashSet<>() + for (File jar : jars) { + names.add(jar.getName()) + } + logger.error("[thirdPartyAudit] Scanning: " + names) + + // warn that classes are missing + // TODO: move these to excludes list! + if (missingClasses) { + logger.warn("[thirdPartyAudit] WARNING: CLASSES ARE MISSING! Expect NoClassDefFoundError in bug reports from users!") + } + + // TODO: forbidden-apis + zipfileset gives O(n^2) behavior unless we dump to a tmpdir first, + // and then remove our temp dir afterwards. don't complain: try it yourself. + // we don't use gradle temp dir handling, just google it, or try it yourself. + + File tmpDir = new File(project.buildDir, 'tmp/thirdPartyAudit') + + // clean up any previous mess (if we failed), then unzip everything to one directory + ant.delete(dir: tmpDir.getAbsolutePath()) + tmpDir.mkdirs() + for (File jar : jars) { + ant.unzip(src: jar.getAbsolutePath(), dest: tmpDir.getAbsolutePath()) + } + + // convert exclusion class names to binary file names + String[] excludedFiles = new String[excludes.length]; + for (int i = 0; i < excludes.length; i++) { + excludedFiles[i] = excludes[i].replace('.', '/') + ".class" + // check if the excluded file exists, if not, sure sign things are outdated + if (! new File(tmpDir, excludedFiles[i]).exists()) { + throw new IllegalStateException("bogus thirdPartyAudit exclusion: '" + excludes[i] + "', not found in any dependency") + } + } + + // jarHellReprise + checkSheistyClasses(tmpDir.toPath(), new HashSet<>(Arrays.asList(excludedFiles))); + + ant.thirdPartyAudit(internalRuntimeForbidden: true, + failOnUnsupportedJava: false, + failOnMissingClasses: !missingClasses, + classpath: project.configurations.testCompile.asPath) { + fileset(dir: tmpDir, excludes: excludedFiles.join(',')) + } + // clean up our mess (if we succeed) + ant.delete(dir: tmpDir.getAbsolutePath()) + } + + /** + * check for sheisty classes: if they also exist in the extensions classloader, its jar hell with the jdk! + */ + private void checkSheistyClasses(Path root, Set excluded) { + // system.parent = extensions loader. + // note: for jigsaw, this evilness will need modifications (e.g. use jrt filesystem!). + // but groovy/gradle needs to work at all first! + ClassLoader ext = ClassLoader.getSystemClassLoader().getParent() + assert ext != null + + Set sheistySet = new TreeSet<>(); + Files.walkFileTree(root, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { + String entry = root.relativize(file).toString() + if (entry.endsWith(".class")) { + if (ext.getResource(entry) != null) { + sheistySet.add(entry); + } + } + return FileVisitResult.CONTINUE; + } + }); + + // check if we are ok + if (sheistySet.isEmpty()) { + return; + } + + // leniency against exclusions list + sheistySet.removeAll(excluded); + + if (sheistySet.isEmpty()) { + logger.warn("[thirdPartyAudit] WARNING: JAR HELL WITH JDK! Expect insanely hard-to-debug problems!") + } else { + throw new IllegalStateException("JAR HELL WITH JDK! " + sheistySet); + } + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 8bc80da74b5..fa23299cee4 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -33,10 +33,10 @@ class ClusterConfiguration { int numNodes = 1 @Input - int baseHttpPort = 9400 + int httpPort = 0 @Input - int baseTransportPort = 9500 + int transportPort = 0 @Input boolean daemonize = true @@ -55,7 +55,7 @@ class ClusterConfiguration { @Input Closure waitCondition = { NodeInfo node, AntBuilder ant -> File tmpFile = new File(node.cwd, 'wait.success') - ant.get(src: "http://localhost:${node.httpPort()}", + ant.get(src: "http://${node.httpUri()}", dest: tmpFile.toString(), ignoreerrors: true, // do not fail on error, so logging buffers can be flushed by the wait task retries: 10) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index e62175b743e..08976dbdb39 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -38,8 +38,10 @@ class ClusterFormationTasks { /** * Adds dependent tasks to the given task to start and stop a cluster with the given configuration. + * + * Returns an object that will resolve at execution time of the given task to a uri for the cluster. */ - static void setup(Project project, Task task, ClusterConfiguration config) { + static Object setup(Project project, Task task, ClusterConfiguration config) { if (task.getEnabled() == false) { // no need to add cluster formation tasks if the task won't run! return @@ -55,6 +57,9 @@ class ClusterFormationTasks { Task wait = configureWaitTask("${task.name}#wait", project, nodes, startTasks) task.dependsOn(wait) + + // delay the resolution of the uri by wrapping in a closure, so it is not used until read for tests + return "${-> nodes[0].transportUri()}" } /** Adds a dependency on the given distribution */ @@ -200,17 +205,24 @@ class ClusterFormationTasks { /** Adds a task to write elasticsearch.yml for the given node configuration */ static Task configureWriteConfigTask(String name, Project project, Task setup, NodeInfo node) { Map esConfig = [ - 'cluster.name' : node.clusterName, - 'http.port' : node.httpPort(), - 'transport.tcp.port' : node.transportPort(), - 'pidfile' : node.pidFile, - 'discovery.zen.ping.unicast.hosts': (0.. logger.error("| ${line}") } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index 337eea3de97..b369d35c03a 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -43,6 +43,12 @@ class NodeInfo { /** the pid file the node will use */ File pidFile + /** a file written by elasticsearch containing the ports of each bound address for http */ + File httpPortsFile + + /** a file written by elasticsearch containing the ports of each bound address for transport */ + File transportPortsFile + /** elasticsearch home dir */ File homeDir @@ -92,6 +98,10 @@ class NodeInfo { homeDir = homeDir(baseDir, config.distribution) confDir = confDir(baseDir, config.distribution) configFile = new File(confDir, 'elasticsearch.yml') + // even for rpm/deb, the logs are under home because we dont start with real services + File logsDir = new File(homeDir, 'logs') + httpPortsFile = new File(logsDir, 'http.ports') + transportPortsFile = new File(logsDir, 'transport.ports') cwd = new File(baseDir, "cwd") failedMarker = new File(cwd, 'run.failed') startLog = new File(cwd, 'run.log') @@ -119,6 +129,7 @@ class NodeInfo { 'JAVA_HOME' : project.javaHome, 'ES_GC_OPTS': config.jvmArgs // we pass these with the undocumented gc opts so the argline can set gc, etc ] + args.add("-Des.tests.portsfile=true") args.addAll(config.systemProperties.collect { key, value -> "-D${key}=${value}" }) for (Map.Entry property : System.properties.entrySet()) { if (property.getKey().startsWith('es.')) { @@ -159,14 +170,14 @@ class NodeInfo { wrapperScript.setText("\"${esScript}\" ${argsPasser} > run.log 2>&1 ${exitMarker}", 'UTF-8') } - /** Returns the http port for this node */ - int httpPort() { - return config.baseHttpPort + nodeNum + /** Returns an address and port suitable for a uri to connect to this node over http */ + String httpUri() { + return httpPortsFile.readLines("UTF-8").get(0) } - /** Returns the transport port for this node */ - int transportPort() { - return config.baseTransportPort + nodeNum + /** Returns an address and port suitable for a uri to connect to this node over transport protocol */ + String transportUri() { + return transportPortsFile.readLines("UTF-8").get(0) } /** Returns the directory elasticsearch home is contained in for the given distribution */ diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index cd43cd2ca67..24bd57a3a59 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -57,12 +57,12 @@ public class RestIntegTestTask extends RandomizedTestingTask { RestSpecHack.configureDependencies(project) project.afterEvaluate { dependsOn(RestSpecHack.configureTask(project, includePackaged)) - systemProperty('tests.cluster', "localhost:${clusterConfig.baseTransportPort}") } // this must run after all projects have been configured, so we know any project // references can be accessed as a fully configured project.gradle.projectsEvaluated { - ClusterFormationTasks.setup(project, this, clusterConfig) + Object clusterUri = ClusterFormationTasks.setup(project, this, clusterConfig) + systemProperty('tests.cluster', clusterUri) } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy index 32469218a5b..842ef8c35cd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RunTask.groovy @@ -8,7 +8,7 @@ import org.gradle.util.ConfigureUtil public class RunTask extends DefaultTask { - ClusterConfiguration clusterConfig = new ClusterConfiguration(baseHttpPort: 9200, baseTransportPort: 9300, daemonize: false) + ClusterConfiguration clusterConfig = new ClusterConfiguration(httpPort: 9200, transportPort: 9300, daemonize: false) public RunTask() { description = "Runs elasticsearch with '${project.path}'" diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy index f317254cd45..3063853c871 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/StandaloneTestBasePlugin.groovy @@ -42,7 +42,7 @@ public class StandaloneTestBasePlugin implements Plugin { // only setup tests to build project.sourceSets.create('test') - project.dependencies.add('testCompile', "org.elasticsearch:test-framework:${VersionProperties.elasticsearch}") + project.dependencies.add('testCompile', "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}") project.eclipse.classpath.sourceSets = [project.sourceSets.test] project.eclipse.classpath.plusConfigurations = [project.configurations.testRuntime] diff --git a/buildSrc/src/main/resources/forbidden/all-signatures.txt b/buildSrc/src/main/resources/forbidden/all-signatures.txt index 4bc24c9a2af..c1e65cbaf22 100644 --- a/buildSrc/src/main/resources/forbidden/all-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/all-signatures.txt @@ -123,3 +123,6 @@ java.lang.System#getProperties() @ Use BootstrapInfo.getSystemProperties for a r java.util.Collections#EMPTY_LIST java.util.Collections#EMPTY_MAP java.util.Collections#EMPTY_SET + +java.util.Collections#shuffle(java.util.List) @ Use java.util.Collections#shuffle(java.util.List, java.util.Random) with a reproducible source of randomness +java.util.Random#() @ Use org.elasticsearch.common.random.Randomness#create for reproducible sources of randomness diff --git a/core/build.gradle b/core/build.gradle index fd8a0c10f5a..61cdd12a194 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -82,7 +82,7 @@ dependencies { compile "net.java.dev.jna:jna:${versions.jna}", optional if (isEclipse == false || project.path == ":core-tests") { - testCompile("org.elasticsearch:test-framework:${version}") { + testCompile("org.elasticsearch.test:framework:${version}") { // tests use the locally compiled version of core exclude group: 'org.elasticsearch', module: 'elasticsearch' } @@ -111,6 +111,14 @@ forbiddenPatterns { exclude '**/org/elasticsearch/cluster/routing/shard_routes.txt' } +// classes are missing, e.g. org.jboss.marshalling.Marshaller +thirdPartyAudit.missingClasses = true +// uses internal sun ssl classes! +thirdPartyAudit.excludes = [ + // uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) + 'org.jboss.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', +] + // dependency license are currently checked in distribution dependencyLicenses.enabled = false diff --git a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index fce58d2f88f..9f2b1b66221 100644 --- a/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/core/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -54,7 +54,6 @@ public class MapperQueryParser extends QueryParser { static { Map fieldQueryExtensions = new HashMap<>(); fieldQueryExtensions.put(ExistsFieldQueryExtension.NAME, new ExistsFieldQueryExtension()); - fieldQueryExtensions.put(MissingFieldQueryExtension.NAME, new MissingFieldQueryExtension()); FIELD_QUERY_EXTENSIONS = unmodifiableMap(fieldQueryExtensions); } diff --git a/core/src/main/java/org/elasticsearch/Version.java b/core/src/main/java/org/elasticsearch/Version.java index a5e2e38ca26..b8ba0a411a9 100644 --- a/core/src/main/java/org/elasticsearch/Version.java +++ b/core/src/main/java/org/elasticsearch/Version.java @@ -268,11 +268,15 @@ public class Version { public static final int V_2_0_1_ID = 2000199; public static final Version V_2_0_1 = new Version(V_2_0_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_0_2_ID = 2000299; - public static final Version V_2_0_2 = new Version(V_2_0_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final Version V_2_0_2 = new Version(V_2_0_2_ID, false, org.apache.lucene.util.Version.LUCENE_5_2_1); + public static final int V_2_0_3_ID = 2000399; + public static final Version V_2_0_3 = new Version(V_2_0_3_ID, true, org.apache.lucene.util.Version.LUCENE_5_2_1); public static final int V_2_1_0_ID = 2010099; public static final Version V_2_1_0 = new Version(V_2_1_0_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_1_1_ID = 2010199; - public static final Version V_2_1_1 = new Version(V_2_1_1_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); + public static final Version V_2_1_1 = new Version(V_2_1_1_ID, false, org.apache.lucene.util.Version.LUCENE_5_3_1); + public static final int V_2_1_2_ID = 2010299; + public static final Version V_2_1_2 = new Version(V_2_1_2_ID, true, org.apache.lucene.util.Version.LUCENE_5_3_1); public static final int V_2_2_0_ID = 2020099; public static final Version V_2_2_0 = new Version(V_2_2_0_ID, true, org.apache.lucene.util.Version.LUCENE_5_4_0); public static final int V_3_0_0_ID = 3000099; @@ -293,10 +297,14 @@ public class Version { return V_3_0_0; case V_2_2_0_ID: return V_2_2_0; + case V_2_1_2_ID: + return V_2_1_2; case V_2_1_1_ID: return V_2_1_1; case V_2_1_0_ID: return V_2_1_0; + case V_2_0_3_ID: + return V_2_0_3; case V_2_0_2_ID: return V_2_0_2; case V_2_0_1_ID: diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 88ccb809712..adcb873e838 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -107,6 +107,8 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; +import org.elasticsearch.action.admin.indices.flush.TransportSyncedFlushAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.TransportDeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; @@ -293,6 +295,7 @@ public class ActionModule extends AbstractModule { registerAction(ValidateQueryAction.INSTANCE, TransportValidateQueryAction.class); registerAction(RefreshAction.INSTANCE, TransportRefreshAction.class); registerAction(FlushAction.INSTANCE, TransportFlushAction.class); + registerAction(SyncedFlushAction.INSTANCE, TransportSyncedFlushAction.class); registerAction(ForceMergeAction.INSTANCE, TransportForceMergeAction.class); registerAction(UpgradeAction.INSTANCE, TransportUpgradeAction.class); registerAction(UpgradeStatusAction.INSTANCE, TransportUpgradeStatusAction.class); diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java new file mode 100644 index 00000000000..f5020a46b37 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdater.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.cluster.ClusterState.builder; + +/** + * Updates transient and persistent cluster state settings if there are any changes + * due to the update. + */ +final class SettingsUpdater { + final Settings.Builder transientUpdates = Settings.settingsBuilder(); + final Settings.Builder persistentUpdates = Settings.settingsBuilder(); + private final ClusterSettings clusterSettings; + + SettingsUpdater(ClusterSettings clusterSettings) { + this.clusterSettings = clusterSettings; + } + + synchronized Settings getTransientUpdates() { + return transientUpdates.build(); + } + + synchronized Settings getPersistentUpdate() { + return persistentUpdates.build(); + } + + synchronized ClusterState updateSettings(final ClusterState currentState, Settings transientToApply, Settings persistentToApply) { + boolean changed = false; + Settings.Builder transientSettings = Settings.settingsBuilder(); + transientSettings.put(currentState.metaData().transientSettings()); + changed |= apply(transientToApply, transientSettings, transientUpdates, "transient"); + + Settings.Builder persistentSettings = Settings.settingsBuilder(); + persistentSettings.put(currentState.metaData().persistentSettings()); + changed |= apply(persistentToApply, persistentSettings, persistentUpdates, "persistent"); + + if (!changed) { + return currentState; + } + + MetaData.Builder metaData = MetaData.builder(currentState.metaData()) + .persistentSettings(persistentSettings.build()) + .transientSettings(transientSettings.build()); + + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + boolean updatedReadOnly = MetaData.SETTING_READ_ONLY_SETTING.get(metaData.persistentSettings()) || MetaData.SETTING_READ_ONLY_SETTING.get(metaData.transientSettings()); + if (updatedReadOnly) { + blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); + } else { + blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); + } + ClusterState build = builder(currentState).metaData(metaData).blocks(blocks).build(); + Settings settings = build.metaData().settings(); + // now we try to apply things and if they are invalid we fail + // this dryRun will validate & parse settings but won't actually apply them. + clusterSettings.dryRun(settings); + return build; + } + + private boolean apply(Settings toApply, Settings.Builder target, Settings.Builder updates, String type) { + boolean changed = false; + final Set toRemove = new HashSet<>(); + Settings.Builder settingsBuilder = Settings.settingsBuilder(); + for (Map.Entry entry : toApply.getAsMap().entrySet()) { + if (entry.getValue() == null) { + toRemove.add(entry.getKey()); + } else if (clusterSettings.isLoggerSetting(entry.getKey()) || clusterSettings.hasDynamicSetting(entry.getKey())) { + settingsBuilder.put(entry.getKey(), entry.getValue()); + updates.put(entry.getKey(), entry.getValue()); + changed = true; + } else { + throw new IllegalArgumentException(type + " setting [" + entry.getKey() + "], not dynamically updateable"); + } + + } + changed |= applyDeletes(toRemove, target); + target.put(settingsBuilder.build()); + return changed; + } + + private final boolean applyDeletes(Set deletes, Settings.Builder builder) { + boolean changed = false; + for (String entry : deletes) { + Set keysToRemove = new HashSet<>(); + Set keySet = builder.internalMap().keySet(); + for (String key : keySet) { + if (Regex.simpleMatch(entry, key)) { + keysToRemove.add(key); + } + } + for (String key : keysToRemove) { + builder.remove(key); + changed = true; + } + } + return changed; + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 73d14a2bb11..99815b77ff7 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -28,23 +28,19 @@ import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; -import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.Map; - import static org.elasticsearch.cluster.ClusterState.builder; /** @@ -54,15 +50,14 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct private final AllocationService allocationService; - private final DynamicSettings dynamicSettings; + private final ClusterSettings clusterSettings; @Inject public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - AllocationService allocationService, @ClusterDynamicSettings DynamicSettings dynamicSettings, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { + AllocationService allocationService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterSettings clusterSettings) { super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, ClusterUpdateSettingsRequest::new); this.allocationService = allocationService; - this.dynamicSettings = dynamicSettings; + this.clusterSettings = clusterSettings; } @Override @@ -73,8 +68,8 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected ClusterBlockException checkBlock(ClusterUpdateSettingsRequest request, ClusterState state) { // allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it - if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && request.persistentSettings().get(MetaData.SETTING_READ_ONLY) != null) || - request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && request.transientSettings().get(MetaData.SETTING_READ_ONLY) != null) { + if ((request.transientSettings().getAsMap().isEmpty() && request.persistentSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.persistentSettings())) || + request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && MetaData.SETTING_READ_ONLY_SETTING.exists(request.transientSettings())) { return null; } return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); @@ -88,9 +83,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener listener) { - final Settings.Builder transientUpdates = Settings.settingsBuilder(); - final Settings.Builder persistentUpdates = Settings.settingsBuilder(); - + final SettingsUpdater updater = new SettingsUpdater(clusterSettings); clusterService.submitStateUpdateTask("cluster_update_settings", new AckedClusterStateUpdateTask(Priority.IMMEDIATE, request, listener) { @@ -98,7 +91,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { - return new ClusterUpdateSettingsResponse(acknowledged, transientUpdates.build(), persistentUpdates.build()); + return new ClusterUpdateSettingsResponse(acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate()); } @Override @@ -125,7 +118,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct // so we should *not* execute the reroute. if (!clusterService.state().nodes().localNodeMaster()) { logger.debug("Skipping reroute after cluster update settings, because node is no longer master"); - listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build())); + listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate())); return; } @@ -145,13 +138,13 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override //we return when the cluster reroute is acked or it times out but the acknowledged flag depends on whether the update settings was acknowledged protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) { - return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, transientUpdates.build(), persistentUpdates.build()); + return new ClusterUpdateSettingsResponse(updateSettingsAcked && acknowledged, updater.getTransientUpdates(), updater.getPersistentUpdate()); } @Override public void onNoLongerMaster(String source) { logger.debug("failed to preform reroute after cluster settings were updated - current node is no longer a master"); - listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, transientUpdates.build(), persistentUpdates.build())); + listener.onResponse(new ClusterUpdateSettingsResponse(updateSettingsAcked, updater.getTransientUpdates(), updater.getPersistentUpdate())); } @Override @@ -181,58 +174,11 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAct @Override public ClusterState execute(final ClusterState currentState) { - Settings.Builder transientSettings = Settings.settingsBuilder(); - transientSettings.put(currentState.metaData().transientSettings()); - for (Map.Entry entry : request.transientSettings().getAsMap().entrySet()) { - if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) { - String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state()); - if (error == null) { - transientSettings.put(entry.getKey(), entry.getValue()); - transientUpdates.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - logger.warn("ignoring transient setting [{}], [{}]", entry.getKey(), error); - } - } else { - logger.warn("ignoring transient setting [{}], not dynamically updateable", entry.getKey()); - } - } - - Settings.Builder persistentSettings = Settings.settingsBuilder(); - persistentSettings.put(currentState.metaData().persistentSettings()); - for (Map.Entry entry : request.persistentSettings().getAsMap().entrySet()) { - if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) { - String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state()); - if (error == null) { - persistentSettings.put(entry.getKey(), entry.getValue()); - persistentUpdates.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error); - } - } else { - logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey()); - } - } - - if (!changed) { - return currentState; - } - - MetaData.Builder metaData = MetaData.builder(currentState.metaData()) - .persistentSettings(persistentSettings.build()) - .transientSettings(transientSettings.build()); - - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - boolean updatedReadOnly = metaData.persistentSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || metaData.transientSettings().getAsBoolean(MetaData.SETTING_READ_ONLY, false); - if (updatedReadOnly) { - blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); - } else { - blocks.removeGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); - } - - return builder(currentState).metaData(metaData).blocks(blocks).build(); + ClusterState clusterState = updater.updateSettings(currentState, request.transientSettings(), request.persistentSettings()); + changed = clusterState != currentState; + return clusterState; } }); } + } diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java index 5fe8297a6ba..f2bfb18c43f 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/validate/template/TransportRenderSearchTemplateAction.java @@ -33,6 +33,8 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import java.util.Collections; + public class TransportRenderSearchTemplateAction extends HandledTransportAction { private final ScriptService scriptService; @@ -55,7 +57,7 @@ public class TransportRenderSearchTemplateAction extends HandledTransportAction< @Override protected void doRun() throws Exception { - ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request); + ExecutableScript executable = scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, request, Collections.emptyMap()); BytesReference processedTemplate = (BytesReference) executable.run(); RenderSearchTemplateResponse response = new RenderSearchTemplateResponse(); response.source(processedTemplate); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index 2c25ee34f18..e454fcabc7a 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -31,31 +31,36 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; /** * Close index action */ -public class TransportCloseIndexAction extends TransportMasterNodeAction implements NodeSettingsService.Listener { +public class TransportCloseIndexAction extends TransportMasterNodeAction { private final MetaDataIndexStateService indexStateService; private final DestructiveOperations destructiveOperations; private volatile boolean closeIndexEnabled; - public static final String SETTING_CLUSTER_INDICES_CLOSE_ENABLE = "cluster.indices.close.enable"; + public static final Setting CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER); @Inject public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexStateService indexStateService, - NodeSettingsService nodeSettingsService, ActionFilters actionFilters, + ClusterSettings clusterSettings, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, CloseIndexRequest::new); this.indexStateService = indexStateService; this.destructiveOperations = destructiveOperations; - this.closeIndexEnabled = settings.getAsBoolean(SETTING_CLUSTER_INDICES_CLOSE_ENABLE, true); - nodeSettingsService.addListener(this); + this.closeIndexEnabled = CLUSTER_INDICES_CLOSE_ENABLE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_INDICES_CLOSE_ENABLE_SETTING, this::setCloseIndexEnabled); + } + + private void setCloseIndexEnabled(boolean closeIndexEnabled) { + this.closeIndexEnabled = closeIndexEnabled; } @Override @@ -73,7 +78,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction listener) { destructiveOperations.failDestructive(request.indices()); if (closeIndexEnabled == false) { - throw new IllegalStateException("closing indices is disabled - set [" + SETTING_CLUSTER_INDICES_CLOSE_ENABLE + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); + throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() + ": true] to enable it. NOTE: closed indices still consume a significant amount of diskspace"); } super.doExecute(request, listener); } @@ -104,13 +109,4 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction { + + public static final SyncedFlushAction INSTANCE = new SyncedFlushAction(); + public static final String NAME = "indices:admin/synced_flush"; + + private SyncedFlushAction() { + super(NAME); + } @Override - public Query query(QueryShardContext context, String queryText) { - Query query = MissingQueryBuilder.newFilter(context, queryText, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE, MissingQueryBuilder.DEFAULT_NULL_VALUE); - if (query != null) { - return new ConstantScoreQuery(query); - } - return null; + public SyncedFlushResponse newResponse() { + return new SyncedFlushResponse(); + } + + @Override + public SyncedFlushRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new SyncedFlushRequestBuilder(client, this); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java new file mode 100644 index 00000000000..59719fe8877 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequest.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; + +import java.util.Arrays; + +/** + * A synced flush request to sync flush one or more indices. The synced flush process of an index performs a flush + * and writes the same sync id to primary and all copies. + * + *

Best created with {@link org.elasticsearch.client.Requests#syncedFlushRequest(String...)}.

+ * + * @see org.elasticsearch.client.Requests#flushRequest(String...) + * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) + * @see SyncedFlushResponse + */ +public class SyncedFlushRequest extends BroadcastRequest { + + public SyncedFlushRequest() { + } + + /** + * Copy constructor that creates a new synced flush request that is a copy of the one provided as an argument. + * The new request will inherit though headers and context from the original request that caused it. + */ + public SyncedFlushRequest(ActionRequest originalRequest) { + super(originalRequest); + } + + /** + * Constructs a new synced flush request against one or more indices. If nothing is provided, all indices will + * be sync flushed. + */ + public SyncedFlushRequest(String... indices) { + super(indices); + } + + + @Override + public String toString() { + return "SyncedFlushRequest{" + + "indices=" + Arrays.toString(indices) + "}"; + } +} diff --git a/core/src/main/java/org/elasticsearch/rest/RestModule.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java similarity index 50% rename from core/src/main/java/org/elasticsearch/rest/RestModule.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java index e7949172d0a..9e407260811 100644 --- a/core/src/main/java/org/elasticsearch/rest/RestModule.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushRequestBuilder.java @@ -17,35 +17,25 @@ * under the License. */ -package org.elasticsearch.rest; +package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.rest.action.RestActionModule; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.ElasticsearchClient; -import java.util.ArrayList; -import java.util.List; +public class SyncedFlushRequestBuilder extends ActionRequestBuilder { -/** - * - */ -public class RestModule extends AbstractModule { - - private final Settings settings; - private List> restPluginsActions = new ArrayList<>(); - - public void addRestAction(Class restAction) { - restPluginsActions.add(restAction); + public SyncedFlushRequestBuilder(ElasticsearchClient client, SyncedFlushAction action) { + super(client, action, new SyncedFlushRequest()); } - public RestModule(Settings settings) { - this.settings = settings; + public SyncedFlushRequestBuilder setIndices(String[] indices) { + super.request().indices(indices); + return this; } - - @Override - protected void configure() { - bind(RestController.class).asEagerSingleton(); - new RestActionModule(restPluginsActions).configure(binder()); + public SyncedFlushRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { + super.request().indicesOptions(indicesOptions); + return this; } } diff --git a/core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java similarity index 64% rename from core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java index 435c0d138cd..5925370e5f7 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/IndicesSyncedFlushResult.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushResponse.java @@ -16,16 +16,25 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.indices.flush; +package org.elasticsearch.action.admin.indices.flush; +import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; +import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; +import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -34,13 +43,16 @@ import static java.util.Collections.unmodifiableMap; /** * The result of performing a sync flush operation on all shards of multiple indices */ -public class IndicesSyncedFlushResult implements ToXContent { +public class SyncedFlushResponse extends ActionResponse implements ToXContent { - final Map> shardsResultPerIndex; - final ShardCounts shardCounts; + Map> shardsResultPerIndex; + ShardCounts shardCounts; + SyncedFlushResponse() { - public IndicesSyncedFlushResult(Map> shardsResultPerIndex) { + } + + public SyncedFlushResponse(Map> shardsResultPerIndex) { // shardsResultPerIndex is never modified after it is passed to this // constructor so this is safe even though shardsResultPerIndex is a // ConcurrentHashMap @@ -48,17 +60,23 @@ public class IndicesSyncedFlushResult implements ToXContent { this.shardCounts = calculateShardCounts(Iterables.flatten(shardsResultPerIndex.values())); } - /** total number shards, including replicas, both assigned and unassigned */ + /** + * total number shards, including replicas, both assigned and unassigned + */ public int totalShards() { return shardCounts.total; } - /** total number of shards for which the operation failed */ + /** + * total number of shards for which the operation failed + */ public int failedShards() { return shardCounts.failed; } - /** total number of shards which were successfully sync-flushed */ + /** + * total number of shards which were successfully sync-flushed + */ public int successfulShards() { return shardCounts.successful; } @@ -91,8 +109,8 @@ public class IndicesSyncedFlushResult implements ToXContent { builder.endObject(); continue; } - Map failedShards = shardResults.failedShards(); - for (Map.Entry shardEntry : failedShards.entrySet()) { + Map failedShards = shardResults.failedShards(); + for (Map.Entry shardEntry : failedShards.entrySet()) { builder.startObject(); builder.field(Fields.SHARD, shardResults.shardId().id()); builder.field(Fields.REASON, shardEntry.getValue().failureReason()); @@ -123,11 +141,11 @@ public class IndicesSyncedFlushResult implements ToXContent { return new ShardCounts(total, successful, failed); } - static final class ShardCounts implements ToXContent { + static final class ShardCounts implements ToXContent, Streamable { - public final int total; - public final int successful; - public final int failed; + public int total; + public int successful; + public int failed; ShardCounts(int total, int successful, int failed) { this.total = total; @@ -135,6 +153,10 @@ public class IndicesSyncedFlushResult implements ToXContent { this.failed = failed; } + ShardCounts() { + + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.field(Fields.TOTAL, total); @@ -142,6 +164,20 @@ public class IndicesSyncedFlushResult implements ToXContent { builder.field(Fields.FAILED, failed); return builder; } + + @Override + public void readFrom(StreamInput in) throws IOException { + total = in.readInt(); + successful = in.readInt(); + failed = in.readInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(total); + out.writeInt(successful); + out.writeInt(failed); + } } static final class Fields { @@ -154,4 +190,37 @@ public class IndicesSyncedFlushResult implements ToXContent { static final XContentBuilderString ROUTING = new XContentBuilderString("routing"); static final XContentBuilderString REASON = new XContentBuilderString("reason"); } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + shardCounts = new ShardCounts(); + shardCounts.readFrom(in); + Map> tmpShardsResultPerIndex = new HashMap<>(); + int numShardsResults = in.readInt(); + for (int i =0 ; i< numShardsResults; i++) { + String index = in.readString(); + List shardsSyncedFlushResults = new ArrayList<>(); + int numShards = in.readInt(); + for (int j =0; j< numShards; j++) { + shardsSyncedFlushResults.add(ShardsSyncedFlushResult.readShardsSyncedFlushResult(in)); + } + tmpShardsResultPerIndex.put(index, shardsSyncedFlushResults); + } + shardsResultPerIndex = Collections.unmodifiableMap(tmpShardsResultPerIndex); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardCounts.writeTo(out); + out.writeInt(shardsResultPerIndex.size()); + for (Map.Entry> entry : shardsResultPerIndex.entrySet()) { + out.writeString(entry.getKey()); + out.writeInt(entry.getValue().size()); + for (ShardsSyncedFlushResult shardsSyncedFlushResult : entry.getValue()) { + shardsSyncedFlushResult.writeTo(out); + } + } + } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java new file mode 100644 index 00000000000..3ba354f4629 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportSyncedFlushAction.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.indices.flush.SyncedFlushService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +/** + * Synced flush Action. + */ +public class TransportSyncedFlushAction extends HandledTransportAction { + + SyncedFlushService syncedFlushService; + + @Inject + public TransportSyncedFlushAction(Settings settings, ThreadPool threadPool, + TransportService transportService, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + SyncedFlushService syncedFlushService) { + super(settings, SyncedFlushAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SyncedFlushRequest::new); + this.syncedFlushService = syncedFlushService; + } + + @Override + protected void doExecute(SyncedFlushRequest request, ActionListener listener) { + syncedFlushService.attemptSyncedFlush(request.indices(), request.indicesOptions(), listener); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index cab1047cac4..2717a2320ef 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -32,12 +32,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.Arrays; - /** * Open index action */ @@ -49,7 +46,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction { private DiscoveryNode node; private long version; + private String allocationId; private Throwable storeException; - private Allocation allocation; + private AllocationStatus allocationStatus; /** * The status of the shard store with respect to the cluster */ - public enum Allocation { + public enum AllocationStatus { /** * Allocated as primary @@ -81,16 +82,16 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon private final byte id; - Allocation(byte id) { + AllocationStatus(byte id) { this.id = id; } - private static Allocation fromId(byte id) { + private static AllocationStatus fromId(byte id) { switch (id) { case 0: return PRIMARY; case 1: return REPLICA; case 2: return UNUSED; - default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); + default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); } } @@ -99,11 +100,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon case 0: return "primary"; case 1: return "replica"; case 2: return "unused"; - default: throw new IllegalArgumentException("unknown id for allocation [" + id + "]"); + default: throw new IllegalArgumentException("unknown id for allocation status [" + id + "]"); } } - private static Allocation readFrom(StreamInput in) throws IOException { + private static AllocationStatus readFrom(StreamInput in) throws IOException { return fromId(in.readByte()); } @@ -115,10 +116,11 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon private StoreStatus() { } - public StoreStatus(DiscoveryNode node, long version, Allocation allocation, Throwable storeException) { + public StoreStatus(DiscoveryNode node, long version, String allocationId, AllocationStatus allocationStatus, Throwable storeException) { this.node = node; this.version = version; - this.allocation = allocation; + this.allocationId = allocationId; + this.allocationStatus = allocationStatus; this.storeException = storeException; } @@ -130,13 +132,20 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } /** - * Version of the store, used to select the store that will be - * used as a primary. + * Version of the store */ public long getVersion() { return version; } + /** + * AllocationStatus id of the store, used to select the store that will be + * used as a primary. + */ + public String getAllocationId() { + return allocationId; + } + /** * Exception while trying to open the * shard index or from when the shard failed @@ -146,13 +155,13 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } /** - * The allocation status of the store. - * {@link Allocation#PRIMARY} indicates a primary shard copy - * {@link Allocation#REPLICA} indicates a replica shard copy - * {@link Allocation#UNUSED} indicates an unused shard copy + * The allocationStatus status of the store. + * {@link AllocationStatus#PRIMARY} indicates a primary shard copy + * {@link AllocationStatus#REPLICA} indicates a replica shard copy + * {@link AllocationStatus#UNUSED} indicates an unused shard copy */ - public Allocation getAllocation() { - return allocation; + public AllocationStatus getAllocationStatus() { + return allocationStatus; } static StoreStatus readStoreStatus(StreamInput in) throws IOException { @@ -165,7 +174,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public void readFrom(StreamInput in) throws IOException { node = DiscoveryNode.readNode(in); version = in.readLong(); - allocation = Allocation.readFrom(in); + allocationId = in.readOptionalString(); + allocationStatus = AllocationStatus.readFrom(in); if (in.readBoolean()) { storeException = in.readThrowable(); } @@ -175,7 +185,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public void writeTo(StreamOutput out) throws IOException { node.writeTo(out); out.writeLong(version); - allocation.writeTo(out); + out.writeOptionalString(allocationId); + allocationStatus.writeTo(out); if (storeException != null) { out.writeBoolean(true); out.writeThrowable(storeException); @@ -188,7 +199,8 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { node.toXContent(builder, params); builder.field(Fields.VERSION, version); - builder.field(Fields.ALLOCATED, allocation.value()); + builder.field(Fields.ALLOCATION_ID, allocationId); + builder.field(Fields.ALLOCATED, allocationStatus.value()); if (storeException != null) { builder.startObject(Fields.STORE_EXCEPTION); ElasticsearchException.toXContent(builder, params, storeException); @@ -206,7 +218,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon } else { int compare = Long.compare(other.version, version); if (compare == 0) { - return Integer.compare(allocation.id, other.allocation.id); + return Integer.compare(allocationStatus.id, other.allocationStatus.id); } return compare; } @@ -379,6 +391,7 @@ public class IndicesShardStoresResponse extends ActionResponse implements ToXCon static final XContentBuilderString STORES = new XContentBuilderString("stores"); // StoreStatus fields static final XContentBuilderString VERSION = new XContentBuilderString("version"); + static final XContentBuilderString ALLOCATION_ID = new XContentBuilderString("allocation_id"); static final XContentBuilderString STORE_EXCEPTION = new XContentBuilderString("store_exception"); static final XContentBuilderString ALLOCATED = new XContentBuilderString("allocation"); } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 336ebc254b4..d345c0e7d45 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -179,8 +179,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc } for (NodeGatewayStartedShards response : fetchResponse.responses) { if (shardExistsInNode(response)) { - IndicesShardStoresResponse.StoreStatus.Allocation allocation = getAllocation(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode()); - storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), allocation, response.storeException())); + IndicesShardStoresResponse.StoreStatus.AllocationStatus allocationStatus = getAllocationStatus(fetchResponse.shardId.getIndex(), fetchResponse.shardId.id(), response.getNode()); + storeStatuses.add(new IndicesShardStoresResponse.StoreStatus(response.getNode(), response.version(), response.allocationId(), allocationStatus, response.storeException())); } } CollectionUtil.timSort(storeStatuses); @@ -193,27 +193,27 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc listener.onResponse(new IndicesShardStoresResponse(indicesStoreStatusesBuilder.build(), Collections.unmodifiableList(failureBuilder))); } - private IndicesShardStoresResponse.StoreStatus.Allocation getAllocation(String index, int shardID, DiscoveryNode node) { + private IndicesShardStoresResponse.StoreStatus.AllocationStatus getAllocationStatus(String index, int shardID, DiscoveryNode node) { for (ShardRouting shardRouting : routingNodes.node(node.id())) { ShardId shardId = shardRouting.shardId(); if (shardId.id() == shardID && shardId.getIndex().equals(index)) { if (shardRouting.primary()) { - return IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY; } else if (shardRouting.assignedToNode()) { - return IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA; } else { - return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED; } } } - return IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED; + return IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED; } /** * A shard exists/existed in a node only if shard state file exists in the node */ private boolean shardExistsInNode(final NodeGatewayStartedShards response) { - return response.storeException() != null || response.version() != -1; + return response.storeException() != null || response.version() != -1 || response.allocationId() != null; } @Override diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java new file mode 100644 index 00000000000..a0ccca0fb5c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.common.unit.TimeValue; + +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * Provides a backoff policy for bulk requests. Whenever a bulk request is rejected due to resource constraints (i.e. the client's internal + * thread pool is full), the backoff policy decides how long the bulk processor will wait before the operation is retried internally. + * + * Notes for implementing custom subclasses: + * + * The underlying mathematical principle of BackoffPolicy are progressions which can be either finite or infinite although + * the latter should not be used for retrying. A progression can be mapped to a java.util.Iterator with the following + * semantics: + * + *
    + *
  • #hasNext() determines whether the progression has more elements. Return true for infinite progressions
  • + *
  • #next() determines the next element in the progression, i.e. the next wait time period
  • + *
+ * + * Note that backoff policies are exposed as Iterables in order to be consumed multiple times. + */ +public abstract class BackoffPolicy implements Iterable { + private static final BackoffPolicy NO_BACKOFF = new NoBackoff(); + + /** + * Creates a backoff policy that will not allow any backoff, i.e. an operation will fail after the first attempt. + * + * @return A backoff policy without any backoff period. The returned instance is thread safe. + */ + public static BackoffPolicy noBackoff() { + return NO_BACKOFF; + } + + /** + * Creates an new constant backoff policy with the provided configuration. + * + * @param delay The delay defines how long to wait between retry attempts. Must not be null. + * Must be <= Integer.MAX_VALUE ms. + * @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number. + * @return A backoff policy with a constant wait time between retries. The returned instance is thread safe but each + * iterator created from it should only be used by a single thread. + */ + public static BackoffPolicy constantBackoff(TimeValue delay, int maxNumberOfRetries) { + return new ConstantBackoff(checkDelay(delay), maxNumberOfRetries); + } + + /** + * Creates an new exponential backoff policy with a default configuration of 50 ms initial wait period and 8 retries taking + * roughly 5.1 seconds in total. + * + * @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each + * iterator created from it should only be used by a single thread. + */ + public static BackoffPolicy exponentialBackoff() { + return exponentialBackoff(TimeValue.timeValueMillis(50), 8); + } + + /** + * Creates an new exponential backoff policy with the provided configuration. + * + * @param initialDelay The initial delay defines how long to wait for the first retry attempt. Must not be null. + * Must be <= Integer.MAX_VALUE ms. + * @param maxNumberOfRetries The maximum number of retries. Must be a non-negative number. + * @return A backoff policy with an exponential increase in wait time for retries. The returned instance is thread safe but each + * iterator created from it should only be used by a single thread. + */ + public static BackoffPolicy exponentialBackoff(TimeValue initialDelay, int maxNumberOfRetries) { + return new ExponentialBackoff((int) checkDelay(initialDelay).millis(), maxNumberOfRetries); + } + + private static TimeValue checkDelay(TimeValue delay) { + if (delay.millis() > Integer.MAX_VALUE) { + throw new IllegalArgumentException("delay must be <= " + Integer.MAX_VALUE + " ms"); + } + return delay; + } + + private static class NoBackoff extends BackoffPolicy { + @Override + public Iterator iterator() { + return new Iterator() { + @Override + public boolean hasNext() { + return false; + } + + @Override + public TimeValue next() { + throw new NoSuchElementException("No backoff"); + } + }; + } + } + + private static class ExponentialBackoff extends BackoffPolicy { + private final int start; + + private final int numberOfElements; + + private ExponentialBackoff(int start, int numberOfElements) { + assert start >= 0; + assert numberOfElements >= 0; + this.start = start; + this.numberOfElements = numberOfElements; + } + + @Override + public Iterator iterator() { + return new ExponentialBackoffIterator(start, numberOfElements); + } + } + + private static class ExponentialBackoffIterator implements Iterator { + private final int numberOfElements; + + private final int start; + + private int currentlyConsumed; + + private ExponentialBackoffIterator(int start, int numberOfElements) { + this.start = start; + this.numberOfElements = numberOfElements; + } + + @Override + public boolean hasNext() { + return currentlyConsumed < numberOfElements; + } + + @Override + public TimeValue next() { + if (!hasNext()) { + throw new NoSuchElementException("Only up to " + numberOfElements + " elements"); + } + int result = start + 10 * ((int) Math.exp(0.8d * (currentlyConsumed)) - 1); + currentlyConsumed++; + return TimeValue.timeValueMillis(result); + } + } + + private static final class ConstantBackoff extends BackoffPolicy { + private final TimeValue delay; + + private final int numberOfElements; + + public ConstantBackoff(TimeValue delay, int numberOfElements) { + assert numberOfElements >= 0; + this.delay = delay; + this.numberOfElements = numberOfElements; + } + + @Override + public Iterator iterator() { + return new ConstantBackoffIterator(delay, numberOfElements); + } + } + + private static final class ConstantBackoffIterator implements Iterator { + private final TimeValue delay; + private final int numberOfElements; + private int curr; + + public ConstantBackoffIterator(TimeValue delay, int numberOfElements) { + this.delay = delay; + this.numberOfElements = numberOfElements; + } + + @Override + public boolean hasNext() { + return curr < numberOfElements; + } + + @Override + public TimeValue next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + curr++; + return delay; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 2a7c185ad8a..43014cfb759 100644 --- a/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -48,7 +47,7 @@ public class BulkProcessor implements Closeable { /** * A listener for the execution. */ - public static interface Listener { + public interface Listener { /** * Callback before the bulk is executed. @@ -62,6 +61,9 @@ public class BulkProcessor implements Closeable { /** * Callback after a failed execution of bulk request. + * + * Note that in case an instance of InterruptedException is passed, which means that request processing has been + * cancelled externally, the thread's interruption status has been restored prior to calling this method. */ void afterBulk(long executionId, BulkRequest request, Throwable failure); } @@ -79,6 +81,7 @@ public class BulkProcessor implements Closeable { private int bulkActions = 1000; private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); private TimeValue flushInterval = null; + private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff(); /** * Creates a builder of bulk processor with the client to use and the listener that will be used @@ -136,11 +139,27 @@ public class BulkProcessor implements Closeable { return this; } + /** + * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally + * in case they have failed due to resource constraints (i.e. a thread pool was full). + * + * The default is to back off exponentially. + * + * @see org.elasticsearch.action.bulk.BackoffPolicy#exponentialBackoff() + */ + public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) { + if (backoffPolicy == null) { + throw new NullPointerException("'backoffPolicy' must not be null. To disable backoff, pass BackoffPolicy.noBackoff()"); + } + this.backoffPolicy = backoffPolicy; + return this; + } + /** * Builds a new bulk processor. */ public BulkProcessor build() { - return new BulkProcessor(client, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); + return new BulkProcessor(client, backoffPolicy, listener, name, concurrentRequests, bulkActions, bulkSize, flushInterval); } } @@ -148,42 +167,31 @@ public class BulkProcessor implements Closeable { if (client == null) { throw new NullPointerException("The client you specified while building a BulkProcessor is null"); } - + return new Builder(client, listener); } - private final Client client; - private final Listener listener; - - private final String name; - - private final int concurrentRequests; private final int bulkActions; private final long bulkSize; - private final TimeValue flushInterval; - private final Semaphore semaphore; + private final ScheduledThreadPoolExecutor scheduler; private final ScheduledFuture scheduledFuture; private final AtomicLong executionIdGen = new AtomicLong(); private BulkRequest bulkRequest; + private final BulkRequestHandler bulkRequestHandler; private volatile boolean closed = false; - BulkProcessor(Client client, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { - this.client = client; - this.listener = listener; - this.name = name; - this.concurrentRequests = concurrentRequests; + BulkProcessor(Client client, BackoffPolicy backoffPolicy, Listener listener, @Nullable String name, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval) { this.bulkActions = bulkActions; this.bulkSize = bulkSize.bytes(); - this.semaphore = new Semaphore(concurrentRequests); this.bulkRequest = new BulkRequest(); + this.bulkRequestHandler = (concurrentRequests == 0) ? BulkRequestHandler.syncHandler(client, backoffPolicy, listener) : BulkRequestHandler.asyncHandler(client, backoffPolicy, listener, concurrentRequests); - this.flushInterval = flushInterval; if (flushInterval != null) { this.scheduler = (ScheduledThreadPoolExecutor) Executors.newScheduledThreadPool(1, EsExecutors.daemonThreadFactory(client.settings(), (name != null ? "[" + name + "]" : "") + "bulk_processor")); this.scheduler.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); @@ -231,14 +239,7 @@ public class BulkProcessor implements Closeable { if (bulkRequest.numberOfActions() > 0) { execute(); } - if (this.concurrentRequests < 1) { - return true; - } - if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { - semaphore.release(this.concurrentRequests); - return true; - } - return false; + return this.bulkRequestHandler.awaitClose(timeout, unit); } /** @@ -308,58 +309,7 @@ public class BulkProcessor implements Closeable { final long executionId = executionIdGen.incrementAndGet(); this.bulkRequest = new BulkRequest(); - - if (concurrentRequests == 0) { - // execute in a blocking fashion... - boolean afterCalled = false; - try { - listener.beforeBulk(executionId, bulkRequest); - BulkResponse bulkItemResponses = client.bulk(bulkRequest).actionGet(); - afterCalled = true; - listener.afterBulk(executionId, bulkRequest, bulkItemResponses); - } catch (Exception e) { - if (!afterCalled) { - listener.afterBulk(executionId, bulkRequest, e); - } - } - } else { - boolean success = false; - boolean acquired = false; - try { - listener.beforeBulk(executionId, bulkRequest); - semaphore.acquire(); - acquired = true; - client.bulk(bulkRequest, new ActionListener() { - @Override - public void onResponse(BulkResponse response) { - try { - listener.afterBulk(executionId, bulkRequest, response); - } finally { - semaphore.release(); - } - } - - @Override - public void onFailure(Throwable e) { - try { - listener.afterBulk(executionId, bulkRequest, e); - } finally { - semaphore.release(); - } - } - }); - success = true; - } catch (InterruptedException e) { - Thread.interrupted(); - listener.afterBulk(executionId, bulkRequest, e); - } catch (Throwable t) { - listener.afterBulk(executionId, bulkRequest, t); - } finally { - if (!success && acquired) { // if we fail on client.bulk() release the semaphore - semaphore.release(); - } - } - } + this.bulkRequestHandler.execute(bulkRequest, executionId); } private boolean isOverTheLimit() { diff --git a/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java new file mode 100644 index 00000000000..dc98a16c578 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/BulkRequestHandler.java @@ -0,0 +1,166 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; + +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +/** + * Abstracts the low-level details of bulk request handling + */ +abstract class BulkRequestHandler { + protected final ESLogger logger; + protected final Client client; + + protected BulkRequestHandler(Client client) { + this.client = client; + this.logger = Loggers.getLogger(getClass(), client.settings()); + } + + + public abstract void execute(BulkRequest bulkRequest, long executionId); + + public abstract boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException; + + + public static BulkRequestHandler syncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) { + return new SyncBulkRequestHandler(client, backoffPolicy, listener); + } + + public static BulkRequestHandler asyncHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) { + return new AsyncBulkRequestHandler(client, backoffPolicy, listener, concurrentRequests); + } + + private static class SyncBulkRequestHandler extends BulkRequestHandler { + private final BulkProcessor.Listener listener; + private final BackoffPolicy backoffPolicy; + + public SyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener) { + super(client); + this.backoffPolicy = backoffPolicy; + this.listener = listener; + } + + @Override + public void execute(BulkRequest bulkRequest, long executionId) { + boolean afterCalled = false; + try { + listener.beforeBulk(executionId, bulkRequest); + BulkResponse bulkResponse = Retry + .on(EsRejectedExecutionException.class) + .policy(backoffPolicy) + .withSyncBackoff(client, bulkRequest); + afterCalled = true; + listener.afterBulk(executionId, bulkRequest, bulkResponse); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.info("Bulk request {} has been cancelled.", e, executionId); + if (!afterCalled) { + listener.afterBulk(executionId, bulkRequest, e); + } + } catch (Throwable t) { + logger.warn("Failed to execute bulk request {}.", t, executionId); + if (!afterCalled) { + listener.afterBulk(executionId, bulkRequest, t); + } + } + } + + @Override + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + // we are "closed" immediately as there is no request in flight + return true; + } + } + + private static class AsyncBulkRequestHandler extends BulkRequestHandler { + private final BackoffPolicy backoffPolicy; + private final BulkProcessor.Listener listener; + private final Semaphore semaphore; + private final int concurrentRequests; + + private AsyncBulkRequestHandler(Client client, BackoffPolicy backoffPolicy, BulkProcessor.Listener listener, int concurrentRequests) { + super(client); + this.backoffPolicy = backoffPolicy; + assert concurrentRequests > 0; + this.listener = listener; + this.concurrentRequests = concurrentRequests; + this.semaphore = new Semaphore(concurrentRequests); + } + + @Override + public void execute(BulkRequest bulkRequest, long executionId) { + boolean bulkRequestSetupSuccessful = false; + boolean acquired = false; + try { + listener.beforeBulk(executionId, bulkRequest); + semaphore.acquire(); + acquired = true; + Retry.on(EsRejectedExecutionException.class) + .policy(backoffPolicy) + .withAsyncBackoff(client, bulkRequest, new ActionListener() { + @Override + public void onResponse(BulkResponse response) { + try { + listener.afterBulk(executionId, bulkRequest, response); + } finally { + semaphore.release(); + } + } + + @Override + public void onFailure(Throwable e) { + try { + listener.afterBulk(executionId, bulkRequest, e); + } finally { + semaphore.release(); + } + } + }); + bulkRequestSetupSuccessful = true; + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.info("Bulk request {} has been cancelled.", e, executionId); + listener.afterBulk(executionId, bulkRequest, e); + } catch (Throwable t) { + logger.warn("Failed to execute bulk request {}.", t, executionId); + listener.afterBulk(executionId, bulkRequest, t); + } finally { + if (!bulkRequestSetupSuccessful && acquired) { // if we fail on client.bulk() release the semaphore + semaphore.release(); + } + } + } + + @Override + public boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { + if (semaphore.tryAcquire(this.concurrentRequests, timeout, unit)) { + semaphore.release(this.concurrentRequests); + return true; + } + return false; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/bulk/Retry.java b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java new file mode 100644 index 00000000000..477e61045ba --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -0,0 +1,237 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.*; +import java.util.function.Predicate; + +/** + * Encapsulates synchronous and asynchronous retry logic. + */ +class Retry { + private final Class retryOnThrowable; + + private BackoffPolicy backoffPolicy; + + public static Retry on(Class retryOnThrowable) { + return new Retry(retryOnThrowable); + } + + /** + * @param backoffPolicy The backoff policy that defines how long and how often to wait for retries. + */ + public Retry policy(BackoffPolicy backoffPolicy) { + this.backoffPolicy = backoffPolicy; + return this; + } + + Retry(Class retryOnThrowable) { + this.retryOnThrowable = retryOnThrowable; + } + + /** + * Invokes #bulk(BulkRequest, ActionListener) on the provided client. Backs off on the provided exception and delegates results to the + * provided listener. + * + * @param client Client invoking the bulk request. + * @param bulkRequest The bulk request that should be executed. + * @param listener A listener that is invoked when the bulk request finishes or completes with an exception. The listener is not + */ + public void withAsyncBackoff(Client client, BulkRequest bulkRequest, ActionListener listener) { + AsyncRetryHandler r = new AsyncRetryHandler(retryOnThrowable, backoffPolicy, client, listener); + r.execute(bulkRequest); + + } + + /** + * Invokes #bulk(BulkRequest) on the provided client. Backs off on the provided exception. + * + * @param client Client invoking the bulk request. + * @param bulkRequest The bulk request that should be executed. + * @return the bulk response as returned by the client. + * @throws Exception Any exception thrown by the callable. + */ + public BulkResponse withSyncBackoff(Client client, BulkRequest bulkRequest) throws Exception { + return SyncRetryHandler + .create(retryOnThrowable, backoffPolicy, client) + .executeBlocking(bulkRequest) + .actionGet(); + } + + static class AbstractRetryHandler implements ActionListener { + private final ESLogger logger; + private final Client client; + private final ActionListener listener; + private final Iterator backoff; + private final Class retryOnThrowable; + // Access only when holding a client-side lock, see also #addResponses() + private final List responses = new ArrayList<>(); + private final long startTimestampNanos; + // needed to construct the next bulk request based on the response to the previous one + // volatile as we're called from a scheduled thread + private volatile BulkRequest currentBulkRequest; + private volatile ScheduledFuture scheduledRequestFuture; + + public AbstractRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener listener) { + this.retryOnThrowable = retryOnThrowable; + this.backoff = backoffPolicy.iterator(); + this.client = client; + this.listener = listener; + this.logger = Loggers.getLogger(getClass(), client.settings()); + // in contrast to System.currentTimeMillis(), nanoTime() uses a monotonic clock under the hood + this.startTimestampNanos = System.nanoTime(); + } + + @Override + public void onResponse(BulkResponse bulkItemResponses) { + if (!bulkItemResponses.hasFailures()) { + // we're done here, include all responses + addResponses(bulkItemResponses, (r -> true)); + finishHim(); + } else { + if (canRetry(bulkItemResponses)) { + addResponses(bulkItemResponses, (r -> !r.isFailed())); + retry(createBulkRequestForRetry(bulkItemResponses)); + } else { + addResponses(bulkItemResponses, (r -> true)); + finishHim(); + } + } + } + + @Override + public void onFailure(Throwable e) { + try { + listener.onFailure(e); + } finally { + FutureUtils.cancel(scheduledRequestFuture); + } + } + + private void retry(BulkRequest bulkRequestForRetry) { + assert backoff.hasNext(); + TimeValue next = backoff.next(); + logger.trace("Retry of bulk request scheduled in {} ms.", next.millis()); + scheduledRequestFuture = client.threadPool().schedule(next, ThreadPool.Names.SAME, (() -> this.execute(bulkRequestForRetry))); + } + + private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) { + BulkRequest requestToReissue = new BulkRequest(); + int index = 0; + for (BulkItemResponse bulkItemResponse : bulkItemResponses.getItems()) { + if (bulkItemResponse.isFailed()) { + requestToReissue.add(currentBulkRequest.requests().get(index)); + } + index++; + } + return requestToReissue; + } + + private boolean canRetry(BulkResponse bulkItemResponses) { + if (!backoff.hasNext()) { + return false; + } + for (BulkItemResponse bulkItemResponse : bulkItemResponses) { + if (bulkItemResponse.isFailed()) { + Throwable cause = bulkItemResponse.getFailure().getCause(); + Throwable rootCause = ExceptionsHelper.unwrapCause(cause); + if (!rootCause.getClass().equals(retryOnThrowable)) { + return false; + } + } + } + return true; + } + + private void finishHim() { + try { + listener.onResponse(getAccumulatedResponse()); + } finally { + FutureUtils.cancel(scheduledRequestFuture); + } + } + + private void addResponses(BulkResponse response, Predicate filter) { + for (BulkItemResponse bulkItemResponse : response) { + if (filter.test(bulkItemResponse)) { + // Use client-side lock here to avoid visibility issues. This method may be called multiple times + // (based on how many retries we have to issue) and relying that the response handling code will be + // scheduled on the same thread is fragile. + synchronized (responses) { + responses.add(bulkItemResponse); + } + } + } + } + + private BulkResponse getAccumulatedResponse() { + BulkItemResponse[] itemResponses; + synchronized (responses) { + itemResponses = responses.toArray(new BulkItemResponse[1]); + } + long stopTimestamp = System.nanoTime(); + long totalLatencyMs = TimeValue.timeValueNanos(stopTimestamp - startTimestampNanos).millis(); + return new BulkResponse(itemResponses, totalLatencyMs); + } + + public void execute(BulkRequest bulkRequest) { + this.currentBulkRequest = bulkRequest; + client.bulk(bulkRequest, this); + } + } + + static class AsyncRetryHandler extends AbstractRetryHandler { + public AsyncRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, ActionListener listener) { + super(retryOnThrowable, backoffPolicy, client, listener); + } + } + + static class SyncRetryHandler extends AbstractRetryHandler { + private final PlainActionFuture actionFuture; + + public static SyncRetryHandler create(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client) { + PlainActionFuture actionFuture = PlainActionFuture.newFuture(); + return new SyncRetryHandler(retryOnThrowable, backoffPolicy, client, actionFuture); + } + + public SyncRetryHandler(Class retryOnThrowable, BackoffPolicy backoffPolicy, Client client, PlainActionFuture actionFuture) { + super(retryOnThrowable, backoffPolicy, client, actionFuture); + this.actionFuture = actionFuture; + } + + public ActionFuture executeBlocking(BulkRequest bulkRequest) { + super.execute(bulkRequest); + return actionFuture; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java index 68bcdc1503d..1d29e6c3971 100644 --- a/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java +++ b/core/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.percolate; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.IndicesRequest; @@ -37,8 +36,6 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringText; -import org.elasticsearch.common.text.Text; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.threadpool.ThreadPool; diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 442b0915e3b..52d45ec9407 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -473,6 +473,14 @@ public class SearchRequestBuilder extends ActionRequestBuilderfalse + */ + public SearchRequestBuilder setProfile(boolean profile) { + sourceBuilder().profile(profile); + return this; + } + @Override public String toString() { if (request.source() != null) { diff --git a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 769e0978a71..e6681bf2b9f 100644 --- a/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/core/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -32,9 +33,12 @@ import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; +import java.util.List; +import java.util.Map; import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure; import static org.elasticsearch.search.internal.InternalSearchResponse.readInternalSearchResponse; @@ -160,6 +164,16 @@ public class SearchResponse extends ActionResponse implements StatusToXContent { this.scrollId = scrollId; } + /** + * If profiling was enabled, this returns an object containing the profile results from + * each shard. If profiling was not enabled, this will return null + * + * @return The profile results or null + */ + public @Nullable Map> getProfileResults() { + return internalResponse.profile(); + } + static final class Fields { static final XContentBuilderString _SCROLL_ID = new XContentBuilderString("_scroll_id"); static final XContentBuilderString TOOK = new XContentBuilderString("took"); diff --git a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index b73ee8a75fd..5f2fb33e043 100644 --- a/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/core/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -21,25 +21,30 @@ package org.elasticsearch.action.support; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * Helper for dealing with destructive operations and wildcard usage. */ -public final class DestructiveOperations extends AbstractComponent implements NodeSettingsService.Listener { +public final class DestructiveOperations extends AbstractComponent { /** * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. */ - public static final String REQUIRES_NAME = "action.destructive_requires_name"; + public static final Setting REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER); private volatile boolean destructiveRequiresName; @Inject - public DestructiveOperations(Settings settings, NodeSettingsService nodeSettingsService) { + public DestructiveOperations(Settings settings, ClusterSettings clusterSettings) { super(settings); - destructiveRequiresName = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, false); - nodeSettingsService.addListener(this); + destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(REQUIRES_NAME_SETTING, this::setDestructiveRequiresName); + } + + private void setDestructiveRequiresName(boolean destructiveRequiresName) { + this.destructiveRequiresName = destructiveRequiresName; } /** @@ -65,15 +70,6 @@ public final class DestructiveOperations extends AbstractComponent implements No } } - @Override - public void onRefreshSettings(Settings settings) { - boolean newValue = settings.getAsBoolean(DestructiveOperations.REQUIRES_NAME, destructiveRequiresName); - if (destructiveRequiresName != newValue) { - logger.info("updating [action.operate_all_indices] from [{}] to [{}]", destructiveRequiresName, newValue); - this.destructiveRequiresName = newValue; - } - } - private static boolean hasWildcardUsage(String aliasOrIndex) { return "_all".equals(aliasOrIndex) || aliasOrIndex.indexOf('*') != -1; } diff --git a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index 8bcba8ad544..e8f4a0d83cd 100644 --- a/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -19,8 +19,16 @@ package org.elasticsearch.action.support.broadcast.node; -import org.elasticsearch.action.*; -import org.elasticsearch.action.support.*; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.NoShardAvailableActionException; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; @@ -37,7 +45,14 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.NodeShouldNotConnectException; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -394,7 +409,15 @@ public abstract class TransportBroadcastByNodeAction handler = TransportChannelResponseHandler.emptyResponseHandler(logger, channel, extraMessage); + transportService.sendRequest(clusterService.localNode(), transportReplicaAction, request, handler); } @Override diff --git a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 9f8b2a2e7be..d28ba2986e2 100644 --- a/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/core/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.lookup.SourceLookup; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -245,7 +246,7 @@ public class UpdateHelper extends AbstractComponent { private Map executeScript(UpdateRequest request, Map ctx) { try { if (scriptService != null) { - ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request); + ExecutableScript script = scriptService.executable(request.script, ScriptContext.Standard.UPDATE, request, Collections.emptyMap()); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... diff --git a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index 15def3b273e..73eed43352b 100644 --- a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -53,8 +53,8 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; -import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest; +import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder; @@ -82,11 +82,14 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateResponse; @@ -390,6 +393,29 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ FlushRequestBuilder prepareFlush(String... indices); + /** + * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). + * + * @param request The sync flush request + * @return A result future + * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) + */ + ActionFuture syncedFlush(SyncedFlushRequest request); + + /** + * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). + * + * @param request The sync flush request + * @param listener A listener to be notified with a result + * @see org.elasticsearch.client.Requests#syncedFlushRequest(String...) + */ + void syncedFlush(SyncedFlushRequest request, ActionListener listener); + + /** + * Explicitly sync flush one or more indices (write sync id to shards for faster recovery). + */ + SyncedFlushRequestBuilder prepareSyncedFlush(String... indices); + /** * Explicitly force merge one or more indices into a the number of segments. * diff --git a/core/src/main/java/org/elasticsearch/client/Requests.java b/core/src/main/java/org/elasticsearch/client/Requests.java index 7f0decaba52..063fd10dcfc 100644 --- a/core/src/main/java/org/elasticsearch/client/Requests.java +++ b/core/src/main/java/org/elasticsearch/client/Requests.java @@ -50,6 +50,7 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest; import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -131,7 +132,7 @@ public class Requests { public static SuggestRequest suggestRequest(String... indices) { return new SuggestRequest(indices); } - + /** * Creates a search request against one or more indices. Note, the search source must be set either using the * actual JSON search source, or the {@link org.elasticsearch.search.builder.SearchSourceBuilder}. @@ -265,6 +266,17 @@ public class Requests { return new FlushRequest(indices); } + /** + * Creates a synced flush indices request. + * + * @param indices The indices to sync flush. Use null or _all to execute against all indices + * @return The synced flush request + * @see org.elasticsearch.client.IndicesAdminClient#syncedFlush(SyncedFlushRequest) + */ + public static SyncedFlushRequest syncedFlushRequest(String... indices) { + return new SyncedFlushRequest(indices); + } + /** * Creates a force merge request. * diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 1b5e8539ac6..ea57901f2b3 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -188,6 +188,10 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushAction; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequestBuilder; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; @@ -1315,6 +1319,21 @@ public abstract class AbstractClient extends AbstractComponent implements Client return new FlushRequestBuilder(this, FlushAction.INSTANCE).setIndices(indices); } + @Override + public ActionFuture syncedFlush(SyncedFlushRequest request) { + return execute(SyncedFlushAction.INSTANCE, request); + } + + @Override + public void syncedFlush(SyncedFlushRequest request, ActionListener listener) { + execute(SyncedFlushAction.INSTANCE, request, listener); + } + + @Override + public SyncedFlushRequestBuilder prepareSyncedFlush(String... indices) { + return new SyncedFlushRequestBuilder(this, SyncedFlushAction.INSTANCE).setIndices(indices); + } + @Override public void getMappings(GetMappingsRequest request, ActionListener listener) { execute(GetMappingsAction.INSTANCE, request, listener); diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 33cf3479419..3b8be668f43 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -32,7 +32,6 @@ import org.elasticsearch.client.support.Headers; import org.elasticsearch.client.transport.support.TransportProxyClient; import org.elasticsearch.cluster.ClusterNameModule; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.Module; @@ -43,19 +42,15 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.EnvironmentModule; import org.elasticsearch.indices.breaker.CircuitBreakerModule; import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.search.SearchModule; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.netty.NettyTransport; @@ -69,7 +64,7 @@ import static org.elasticsearch.common.settings.Settings.settingsBuilder; * The transport client allows to create a client that is not part of the cluster, but simply connects to one * or more nodes directly by adding their respective addresses using {@link #addTransportAddress(org.elasticsearch.common.transport.TransportAddress)}. *

- * The transport client important modules used is the {@link org.elasticsearch.transport.TransportModule} which is + * The transport client important modules used is the {@link org.elasticsearch.common.network.NetworkModule} which is * started in client mode (only connects, no bind). */ public class TransportClient extends AbstractClient { @@ -143,10 +138,9 @@ public class TransportClient extends AbstractClient { } modules.add(new PluginsModule(pluginsService)); modules.add(new SettingsModule(this.settings, settingsFilter )); - modules.add(new NetworkModule(networkService)); + modules.add(new NetworkModule(networkService, this.settings, true)); modules.add(new ClusterNameModule(this.settings)); modules.add(new ThreadPoolModule(threadPool)); - modules.add(new TransportModule(this.settings)); modules.add(new SearchModule() { @Override protected void configure() { @@ -154,7 +148,6 @@ public class TransportClient extends AbstractClient { } }); modules.add(new ActionModule(true)); - modules.add(new ClientTransportModule()); modules.add(new CircuitBreakerModule(this.settings)); pluginsService.processModules(modules); diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 2b7786fdb1d..43c616d799a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -19,9 +19,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; -import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.action.support.replication.TransportReplicationAction; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.cluster.action.index.NodeIndexDeletedAction; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; @@ -29,7 +26,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateFilter; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService; import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService; @@ -60,17 +56,15 @@ import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocatio import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.cluster.service.InternalClusterService; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.ExtensionPoint; -import org.elasticsearch.discovery.DiscoverySettings; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.engine.EngineConfig; @@ -81,21 +75,13 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.IndicesWarmer; -import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.indices.ttl.IndicesTTLService; -import org.elasticsearch.search.SearchService; import org.elasticsearch.search.internal.DefaultSearchContext; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; +import java.util.*; /** * Configures classes and services that affect the entire cluster. @@ -122,7 +108,6 @@ public class ClusterModule extends AbstractModule { SnapshotInProgressAllocationDecider.class)); private final Settings settings; - private final DynamicSettings.Builder clusterDynamicSettings = new DynamicSettings.Builder(); private final DynamicSettings.Builder indexDynamicSettings = new DynamicSettings.Builder(); private final ExtensionPoint.SelectedType shardsAllocators = new ExtensionPoint.SelectedType<>("shards_allocator", ShardsAllocator.class); private final ExtensionPoint.ClassSet allocationDeciders = new ExtensionPoint.ClassSet<>("allocation_decider", AllocationDecider.class, AllocationDeciders.class); @@ -134,7 +119,6 @@ public class ClusterModule extends AbstractModule { public ClusterModule(Settings settings) { this.settings = settings; - registerBuiltinClusterSettings(); registerBuiltinIndexSettings(); for (Class decider : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) { @@ -144,70 +128,11 @@ public class ClusterModule extends AbstractModule { registerShardsAllocator(ClusterModule.EVEN_SHARD_COUNT_ALLOCATOR, BalancedShardsAllocator.class); } - private void registerBuiltinClusterSettings() { - registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, Validator.EMPTY); - registerClusterDynamicSetting(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, Validator.FLOAT); - registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, Validator.FLOAT); - registerClusterDynamicSetting(BalancedShardsAllocator.SETTING_THRESHOLD, Validator.NON_NEGATIVE_FLOAT); - registerClusterDynamicSetting(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ALLOCATION_ALLOW_REBALANCE_VALIDATOR); - registerClusterDynamicSetting(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, Validator.INTEGER); - registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY); - registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, Validator.EMPTY); - registerClusterDynamicSetting(ZenDiscovery.SETTING_REJOIN_ON_MASTER_GONE, Validator.BOOLEAN); - registerClusterDynamicSetting(DiscoverySettings.NO_MASTER_BLOCK, Validator.EMPTY); - registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY); - registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE, Validator.EMPTY); - registerClusterDynamicSetting(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); - registerClusterDynamicSetting(IndicesTTLService.INDICES_TTL_INTERVAL, Validator.TIME); - registerClusterDynamicSetting(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, Validator.TIME); - registerClusterDynamicSetting(MetaData.SETTING_READ_ONLY, Validator.EMPTY); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, Validator.POSITIVE_INTEGER); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, Validator.POSITIVE_INTEGER); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(ThreadPool.THREADPOOL_GROUP + "*", ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR); - registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, Validator.INTEGER); - registerClusterDynamicSetting(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, Validator.INTEGER); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, Validator.EMPTY); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, Validator.EMPTY); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, Validator.BOOLEAN); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, Validator.BOOLEAN); - registerClusterDynamicSetting(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, Validator.EMPTY); - registerClusterDynamicSetting(DestructiveOperations.REQUIRES_NAME, Validator.EMPTY); - registerClusterDynamicSetting(DiscoverySettings.PUBLISH_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(DiscoverySettings.PUBLISH_DIFF_ENABLE, Validator.BOOLEAN); - registerClusterDynamicSetting(DiscoverySettings.COMMIT_TIMEOUT, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, Validator.MEMORY_SIZE); - registerClusterDynamicSetting(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, Validator.NON_NEGATIVE_DOUBLE); - registerClusterDynamicSetting(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, Validator.TIME_NON_NEGATIVE); - registerClusterDynamicSetting(SearchService.DEFAULT_SEARCH_TIMEOUT, Validator.TIMEOUT); - registerClusterDynamicSetting(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_VALIDATOR); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE, Validator.EMPTY); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_INCLUDE + ".*", Validator.EMPTY); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE, Validator.EMPTY); - registerClusterDynamicSetting(TransportService.SETTING_TRACE_LOG_EXCLUDE + ".*", Validator.EMPTY); - registerClusterDynamicSetting(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, Validator.BOOLEAN); - registerClusterDynamicSetting(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, Validator.INTEGER); - registerClusterDynamicSetting(TransportReplicationAction.SHARD_FAILURE_TIMEOUT, Validator.TIME_NON_NEGATIVE); - } private void registerBuiltinIndexSettings() { registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); registerIndexDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE, Validator.EMPTY); - registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.EMPTY); + registerIndexDynamicSetting(MergeSchedulerConfig.MAX_THREAD_COUNT, Validator.NON_NEGATIVE_INTEGER); registerIndexDynamicSetting(MergeSchedulerConfig.MAX_MERGE_COUNT, Validator.EMPTY); registerIndexDynamicSetting(MergeSchedulerConfig.AUTO_THROTTLE, Validator.EMPTY); registerIndexDynamicSetting(FilterAllocationDecider.INDEX_ROUTING_REQUIRE_GROUP + "*", Validator.EMPTY); @@ -272,9 +197,6 @@ public class ClusterModule extends AbstractModule { indexDynamicSettings.addSetting(setting, validator); } - public void registerClusterDynamicSetting(String setting, Validator validator) { - clusterDynamicSettings.addSetting(setting, validator); - } public void registerAllocationDecider(Class allocationDecider) { allocationDeciders.registerExtension(allocationDecider); @@ -290,7 +212,6 @@ public class ClusterModule extends AbstractModule { @Override protected void configure() { - bind(DynamicSettings.class).annotatedWith(ClusterDynamicSettings.class).toInstance(clusterDynamicSettings.build()); bind(DynamicSettings.class).annotatedWith(IndexDynamicSettings.class).toInstance(indexDynamicSettings.build()); // bind ShardsAllocator diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java index e20f21b4cec..34ccfd3b433 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -129,7 +129,7 @@ public class ClusterState implements ToXContent, Diffable { @SuppressWarnings("unchecked") T proto = (T)customPrototypes.get(type); if (proto == null) { - throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "]"); + throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "], node likely missing plugins"); } return proto; } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java index ab85d9540f0..fb22c2ca368 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterStateTaskExecutor.java @@ -37,6 +37,13 @@ public interface ClusterStateTaskExecutor { return true; } + /** + * Callback invoked after new cluster state is published. Note that + * this method is not invoked if the cluster state was not updated. + */ + default void clusterStatePublished(ClusterState newClusterState) { + } + /** * Represents the result of a batched execution of cluster state update tasks * @param the type of the cluster state update task diff --git a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 039868d16c4..925a5a12ed6 100644 --- a/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/core/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -37,11 +37,12 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.monitor.fs.FsInfo; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ReceiveTimeoutTransportException; @@ -63,8 +64,8 @@ import java.util.concurrent.TimeUnit; */ public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { - public static final String INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL = "cluster.info.update.interval"; - public static final String INTERNAL_CLUSTER_INFO_TIMEOUT = "cluster.info.update.timeout"; + public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER); + public static final Setting INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER); private volatile TimeValue updateFrequency; @@ -82,7 +83,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu private final List listeners = new CopyOnWriteArrayList<>(); @Inject - public InternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService, + public InternalClusterInfoService(Settings settings, ClusterSettings clusterSettings, TransportNodesStatsAction transportNodesStatsAction, TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService, ThreadPool threadPool) { @@ -95,10 +96,12 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu this.transportIndicesStatsAction = transportIndicesStatsAction; this.clusterService = clusterService; this.threadPool = threadPool; - this.updateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, TimeValue.timeValueSeconds(30)); - this.fetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, TimeValue.timeValueSeconds(15)); - this.enabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true); - nodeSettingsService.addListener(new ApplySettings()); + this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings); + this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings); + this.enabled = DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout); + clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency); + clusterSettings.addSettingsUpdateConsumer(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); // Add InternalClusterInfoService to listen for Master changes this.clusterService.add((LocalNodeMasterListener)this); @@ -106,35 +109,16 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu this.clusterService.add((ClusterStateListener)this); } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - TimeValue newUpdateFrequency = settings.getAsTime(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, null); - // ClusterInfoService is only enabled if the DiskThresholdDecider is enabled - Boolean newEnabled = settings.getAsBoolean(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null); + private void setEnabled(boolean enabled) { + this.enabled = enabled; + } - if (newUpdateFrequency != null) { - if (newUpdateFrequency.getMillis() < TimeValue.timeValueSeconds(10).getMillis()) { - logger.warn("[{}] set too low [{}] (< 10s)", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, newUpdateFrequency); - throw new IllegalStateException("Unable to set " + INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL + " less than 10 seconds"); - } else { - logger.info("updating [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, updateFrequency, newUpdateFrequency); - InternalClusterInfoService.this.updateFrequency = newUpdateFrequency; - } - } + private void setFetchTimeout(TimeValue fetchTimeout) { + this.fetchTimeout = fetchTimeout; + } - TimeValue newFetchTimeout = settings.getAsTime(INTERNAL_CLUSTER_INFO_TIMEOUT, null); - if (newFetchTimeout != null) { - logger.info("updating fetch timeout [{}] from [{}] to [{}]", INTERNAL_CLUSTER_INFO_TIMEOUT, fetchTimeout, newFetchTimeout); - InternalClusterInfoService.this.fetchTimeout = newFetchTimeout; - } - - - // We don't log about enabling it here, because the DiskThresholdDecider will already be logging about enable/disable - if (newEnabled != null) { - InternalClusterInfoService.this.enabled = newEnabled; - } - } + void setUpdateFrequency(TimeValue updateFrequency) { + this.updateFrequency = updateFrequency; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index e3925aa6f4e..9e57fe3a48a 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -26,11 +26,12 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.concurrent.TimeoutException; @@ -40,30 +41,23 @@ import java.util.concurrent.TimeoutException; */ public class MappingUpdatedAction extends AbstractComponent { - public static final String INDICES_MAPPING_DYNAMIC_TIMEOUT = "indices.mapping.dynamic_timeout"; + public static final Setting INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - TimeValue current = MappingUpdatedAction.this.dynamicMappingUpdateTimeout; - TimeValue newValue = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, current); - if (!current.equals(newValue)) { - logger.info("updating " + INDICES_MAPPING_DYNAMIC_TIMEOUT + " from [{}] to [{}]", current, newValue); - MappingUpdatedAction.this.dynamicMappingUpdateTimeout = newValue; - } - } + @Inject + public MappingUpdatedAction(Settings settings, ClusterSettings clusterSettings) { + super(settings); + this.dynamicMappingUpdateTimeout = INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, this::setDynamicMappingUpdateTimeout); } - @Inject - public MappingUpdatedAction(Settings settings, NodeSettingsService nodeSettingsService) { - super(settings); - this.dynamicMappingUpdateTimeout = settings.getAsTime(INDICES_MAPPING_DYNAMIC_TIMEOUT, TimeValue.timeValueSeconds(30)); - nodeSettingsService.addListener(new ApplySettings()); + private void setDynamicMappingUpdateTimeout(TimeValue dynamicMappingUpdateTimeout) { + this.dynamicMappingUpdateTimeout = dynamicMappingUpdateTimeout; } + public void setClient(Client client) { this.client = client.admin().indices(); } diff --git a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index d09df094a68..a04a6d7bd51 100644 --- a/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/core/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -20,7 +20,12 @@ package org.elasticsearch.cluster.action.shard; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingService; @@ -37,19 +42,24 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry; -/** - * - */ -public class ShardStateAction extends AbstractComponent { +public class ShardStateAction extends AbstractComponent { public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started"; public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure"; @@ -97,18 +107,101 @@ public class ShardStateAction extends AbstractComponent { options = TransportRequestOptions.builder().withTimeout(timeout).build(); } transportService.sendRequest(masterNode, - SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + SHARD_FAILED_ACTION_NAME, shardRoutingEntry, options, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty response) { + listener.onSuccess(); + } + + @Override + public void handleException(TransportException exp) { + logger.warn("unexpected failure while sending request to [{}] to fail shard [{}]", exp, masterNode, shardRoutingEntry); + listener.onShardFailedFailure(masterNode, exp); + } + }); + } + + private class ShardFailedTransportHandler implements TransportRequestHandler { + @Override + public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { + handleShardFailureOnMaster(request, new ClusterStateTaskListener() { @Override - public void handleResponse(TransportResponse.Empty response) { - listener.onSuccess(); + public void onFailure(String source, Throwable t) { + logger.error("unexpected failure while failing shard [{}]", t, request.shardRouting); + try { + channel.sendResponse(t); + } catch (Throwable channelThrowable) { + logger.warn("failed to send failure [{}] while failing shard [{}]", channelThrowable, t, request.shardRouting); + } } @Override - public void handleException(TransportException exp) { - logger.warn("failed to send failed shard to {}", exp, masterNode); - listener.onShardFailedFailure(masterNode, exp); + public void onNoLongerMaster(String source) { + logger.error("no longer master while failing shard [{}]", request.shardRouting); + try { + channel.sendResponse(new NotMasterException(source)); + } catch (Throwable channelThrowable) { + logger.warn("failed to send no longer master while failing shard [{}]", channelThrowable, request.shardRouting); + } } - }); + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (Throwable channelThrowable) { + logger.warn("failed to send response while failing shard [{}]", channelThrowable, request.shardRouting); + } + } + } + ); + } + } + + class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor { + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + BatchResult.Builder batchResultBuilder = BatchResult.builder(); + List failedShards = new ArrayList<>(tasks.size()); + for (ShardRoutingEntry task : tasks) { + failedShards.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure)); + } + ClusterState maybeUpdatedState = currentState; + try { + RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, failedShards); + if (result.changed()) { + maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build(); + } + batchResultBuilder.successes(tasks); + } catch (Throwable t) { + batchResultBuilder.failures(tasks, t); + } + return batchResultBuilder.build(maybeUpdatedState); + } + + @Override + public void clusterStatePublished(ClusterState newClusterState) { + int numberOfUnassignedShards = newClusterState.getRoutingNodes().unassigned().size(); + if (numberOfUnassignedShards > 0) { + String reason = String.format(Locale.ROOT, "[%d] unassigned shards after failing shards", numberOfUnassignedShards); + if (logger.isTraceEnabled()) { + logger.trace(reason + ", scheduling a reroute"); + } + routingService.reroute(reason); + } + } + } + + private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler(); + + private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry, ClusterStateTaskListener listener) { + logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); + clusterService.submitStateUpdateTask( + "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", + shardRoutingEntry, + ClusterStateTaskConfig.build(Priority.HIGH), + shardFailedClusterStateHandler, + listener); } public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { @@ -124,74 +217,20 @@ public class ShardStateAction extends AbstractComponent { ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason, null); logger.debug("{} sending shard started for {}", shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); transportService.sendRequest(masterNode, - SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleException(TransportException exp) { - logger.warn("failed to send shard started to [{}]", exp, masterNode); - } - - }); - } - - private final ShardFailedClusterStateHandler shardFailedClusterStateHandler = new ShardFailedClusterStateHandler(); - - private void handleShardFailureOnMaster(final ShardRoutingEntry shardRoutingEntry) { - logger.warn("{} received shard failed for {}", shardRoutingEntry.failure, shardRoutingEntry.shardRouting.shardId(), shardRoutingEntry); - clusterService.submitStateUpdateTask( - "shard-failed (" + shardRoutingEntry.shardRouting + "), message [" + shardRoutingEntry.message + "]", - shardRoutingEntry, - ClusterStateTaskConfig.build(Priority.HIGH), - shardFailedClusterStateHandler, - shardFailedClusterStateHandler); - } - - class ShardFailedClusterStateHandler implements ClusterStateTaskExecutor, ClusterStateTaskListener { - @Override - public BatchResult execute(ClusterState currentState, List tasks) throws Exception { - BatchResult.Builder batchResultBuilder = BatchResult.builder(); - List shardRoutingsToBeApplied = new ArrayList<>(tasks.size()); - for (ShardRoutingEntry task : tasks) { - shardRoutingsToBeApplied.add(new FailedRerouteAllocation.FailedShard(task.shardRouting, task.message, task.failure)); - } - ClusterState maybeUpdatedState = currentState; - try { - RoutingAllocation.Result result = allocationService.applyFailedShards(currentState, shardRoutingsToBeApplied); - if (result.changed()) { - maybeUpdatedState = ClusterState.builder(currentState).routingResult(result).build(); + SHARD_STARTED_ACTION_NAME, new ShardRoutingEntry(shardRouting, indexUUID, reason, null), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleException(TransportException exp) { + logger.warn("failed to send shard started to [{}]", exp, masterNode); } - batchResultBuilder.successes(tasks); - } catch (Throwable t) { - batchResultBuilder.failures(tasks, t); - } - return batchResultBuilder.build(maybeUpdatedState); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (oldState != newState && newState.getRoutingNodes().unassigned().size() > 0) { - logger.trace("unassigned shards after shard failures. scheduling a reroute."); - routingService.reroute("unassigned shards after shard failures, scheduling a reroute"); - } - } - - @Override - public void onFailure(String source, Throwable t) { - logger.error("unexpected failure during [{}]", t, source); - } + }); } - private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = - new ShardStartedClusterStateHandler(); - - private void shardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) { - logger.debug("received shard started for {}", shardRoutingEntry); - - clusterService.submitStateUpdateTask( - "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", - shardRoutingEntry, - ClusterStateTaskConfig.build(Priority.URGENT), - shardStartedClusterStateHandler, - shardStartedClusterStateHandler); + class ShardStartedTransportHandler implements TransportRequestHandler { + @Override + public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { + handleShardStartedOnMaster(request); + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } } class ShardStartedClusterStateHandler implements ClusterStateTaskExecutor, ClusterStateTaskListener { @@ -223,26 +262,20 @@ public class ShardStateAction extends AbstractComponent { } } - private class ShardFailedTransportHandler implements TransportRequestHandler { + private final ShardStartedClusterStateHandler shardStartedClusterStateHandler = new ShardStartedClusterStateHandler(); - @Override - public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { - handleShardFailureOnMaster(request); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } + private void handleShardStartedOnMaster(final ShardRoutingEntry shardRoutingEntry) { + logger.debug("received shard started for {}", shardRoutingEntry); - class ShardStartedTransportHandler implements TransportRequestHandler { - - @Override - public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { - shardStartedOnMaster(request); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } + clusterService.submitStateUpdateTask( + "shard-started (" + shardRoutingEntry.shardRouting + "), reason [" + shardRoutingEntry.message + "]", + shardRoutingEntry, + ClusterStateTaskConfig.build(Priority.URGENT), + shardStartedClusterStateHandler, + shardStartedClusterStateHandler); } public static class ShardRoutingEntry extends TransportRequest { - ShardRouting shardRouting; String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; String message; @@ -283,8 +316,13 @@ public class ShardStateAction extends AbstractComponent { } public interface Listener { - default void onSuccess() {} - default void onShardFailedNoMaster() {} - default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) {} + default void onSuccess() { + } + + default void onShardFailedNoMaster() { + } + + default void onShardFailedFailure(final DiscoveryNode master, final TransportException e) { + } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 669d71477ca..93961bf1fbb 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -621,7 +621,7 @@ public class IndexMetaData implements Diffable, FromXContentBuild public int numberOfReplicas() { return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1); } - + public Builder creationDate(long creationDate) { settings = settingsBuilder().put(settings).put(SETTING_CREATION_DATE, creationDate).build(); return this; diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 751f8a09ea5..55cb8a5d944 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -40,8 +40,8 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.support.LoggerMessageFormat; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.loader.SettingsLoader; import org.elasticsearch.common.xcontent.FromXContentBuilder; @@ -134,13 +134,13 @@ public class MetaData implements Iterable, Diffable, Fr //noinspection unchecked T proto = (T) customPrototypes.get(type); if (proto == null) { - throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); + throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "], node likely missing plugins"); } return proto; } - public static final String SETTING_READ_ONLY = "cluster.blocks.read_only"; + public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER); public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); @@ -745,23 +745,23 @@ public class MetaData implements Iterable, Diffable, Fr /** All known byte-sized cluster settings. */ public static final Set CLUSTER_BYTES_SIZE_SETTINGS = unmodifiableSet(newHashSet( - IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, - RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC)); + IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey())); /** All known time cluster settings. */ public static final Set CLUSTER_TIME_SETTINGS = unmodifiableSet(newHashSet( - IndicesTTLService.INDICES_TTL_INTERVAL, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, - RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, - RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, - RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, - RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, - DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, - InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, - DiscoverySettings.PUBLISH_TIMEOUT, - InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD)); + IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(), + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), + DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), + InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey())); /** As of 2.0 we require units for time and byte-sized settings. This methods adds default units to any cluster settings that don't * specify a unit. */ diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 1fa1b702f66..b38e99d4493 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -47,7 +47,6 @@ import org.elasticsearch.rest.RestStatus; import java.util.ArrayList; import java.util.Arrays; import java.util.List; -import java.util.Locale; /** * Service responsible for submitting open/close index requests @@ -92,14 +91,6 @@ public class MetaDataIndexStateService extends AbstractComponent { } if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { - IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index); - for (IndexShardRoutingTable shard : indexRoutingTable) { - for (ShardRouting shardRouting : shard) { - if (shardRouting.primary() == true && shardRouting.allocatedPostIndexCreate() == false) { - throw new IndexPrimaryShardNotAllocatedException(new Index(index)); - } - } - } indicesToClose.add(index); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 957125703b6..8093d93ccce 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -37,7 +37,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; import org.elasticsearch.percolator.PercolatorService; @@ -237,8 +236,8 @@ public class MetaDataMappingService extends AbstractComponent { } private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException { - Map newMappers = new HashMap<>(); - Map existingMappers = new HashMap<>(); + String mappingType = request.type(); + CompressedXContent mappingUpdateSource = new CompressedXContent(request.source()); for (String index : request.indices()) { IndexService indexService = indicesService.indexServiceSafe(index); // try and parse it (no need to add it here) so we can bail early in case of parsing exception @@ -246,16 +245,13 @@ public class MetaDataMappingService extends AbstractComponent { DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type()); if (MapperService.DEFAULT_MAPPING.equals(request.type())) { // _default_ types do not go through merging, but we do test the new settings. Also don't apply the old default - newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), false); + newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, false); } else { - newMapper = indexService.mapperService().parse(request.type(), new CompressedXContent(request.source()), existingMapper == null); + newMapper = indexService.mapperService().parse(request.type(), mappingUpdateSource, existingMapper == null); if (existingMapper != null) { // first, simulate - MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); - // if we have conflicts, throw an exception - if (mergeResult.hasConflicts()) { - throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(mergeResult.buildConflicts()) + "}"); - } + // this will just throw exceptions in case of problems + existingMapper.merge(newMapper.mapping(), true, request.updateAllTypes()); } else { // TODO: can we find a better place for this validation? // The reason this validation is here is that the mapper service doesn't learn about @@ -274,36 +270,31 @@ public class MetaDataMappingService extends AbstractComponent { } } } - newMappers.put(index, newMapper); - if (existingMapper != null) { - existingMappers.put(index, existingMapper); + if (mappingType == null) { + mappingType = newMapper.type(); + } else if (mappingType.equals(newMapper.type()) == false) { + throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); } } + assert mappingType != null; - String mappingType = request.type(); - if (mappingType == null) { - mappingType = newMappers.values().iterator().next().type(); - } else if (!mappingType.equals(newMappers.values().iterator().next().type())) { - throw new InvalidTypeNameException("Type name provided does not match type name within mapping definition"); - } if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && !PercolatorService.TYPE_NAME.equals(mappingType) && mappingType.charAt(0) == '_') { throw new InvalidTypeNameException("Document mapping type name can't start with '_'"); } final Map mappings = new HashMap<>(); - for (Map.Entry entry : newMappers.entrySet()) { - String index = entry.getKey(); + for (String index : request.indices()) { // do the actual merge here on the master, and update the mapping source - DocumentMapper newMapper = entry.getValue(); IndexService indexService = indicesService.indexService(index); if (indexService == null) { continue; } CompressedXContent existingSource = null; - if (existingMappers.containsKey(entry.getKey())) { - existingSource = existingMappers.get(entry.getKey()).mappingSource(); + DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType); + if (existingMapper != null) { + existingSource = existingMapper.mappingSource(); } - DocumentMapper mergedMapper = indexService.mapperService().merge(newMapper.type(), newMapper.mappingSource(), false, request.updateAllTypes()); + DocumentMapper mergedMapper = indexService.mapperService().merge(mappingType, mappingUpdateSource, true, request.updateAllTypes()); CompressedXContent updatedSource = mergedMapper.mappingSource(); if (existingSource != null) { @@ -322,9 +313,9 @@ public class MetaDataMappingService extends AbstractComponent { } else { mappings.put(index, new MappingMetaData(mergedMapper)); if (logger.isDebugEnabled()) { - logger.debug("[{}] create_mapping [{}] with source [{}]", index, newMapper.type(), updatedSource); + logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource); } else if (logger.isInfoEnabled()) { - logger.info("[{}] create_mapping [{}]", index, newMapper.type()); + logger.info("[{}] create_mapping [{}]", index, mappingType); } } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index badf70a191e..8dd980c8bb3 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -21,12 +21,12 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.shard.ShardId; @@ -34,7 +34,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -85,7 +84,7 @@ public class RoutingNodes implements Iterable { Map> nodesToShards = new HashMap<>(); // fill in the nodeToShards with the "live" nodes for (ObjectCursor cursor : clusterState.nodes().dataNodes().values()) { - nodesToShards.put(cursor.value.id(), new ArrayList()); + nodesToShards.put(cursor.value.id(), new ArrayList<>()); } // fill in the inverse of node -> shards allocated @@ -98,21 +97,13 @@ public class RoutingNodes implements Iterable { // by the ShardId, as this is common for primary and replicas. // A replica Set might have one (and not more) replicas with the state of RELOCATING. if (shard.assignedToNode()) { - List entries = nodesToShards.get(shard.currentNodeId()); - if (entries == null) { - entries = new ArrayList<>(); - nodesToShards.put(shard.currentNodeId(), entries); - } + List entries = nodesToShards.computeIfAbsent(shard.currentNodeId(), k -> new ArrayList<>()); final ShardRouting sr = getRouting(shard, readOnly); entries.add(sr); assignedShardsAdd(sr); if (shard.relocating()) { - entries = nodesToShards.get(shard.relocatingNodeId()); relocatingShards++; - if (entries == null) { - entries = new ArrayList<>(); - nodesToShards.put(shard.relocatingNodeId(), entries); - } + entries = nodesToShards.computeIfAbsent(shard.relocatingNodeId(), k -> new ArrayList<>()); // add the counterpart shard with relocatingNodeId reflecting the source from which // it's relocating from. ShardRouting targetShardRouting = shard.buildTargetRelocatingShard(); @@ -128,7 +119,7 @@ public class RoutingNodes implements Iterable { inactiveShardCount++; } } else { - final ShardRouting sr = getRouting(shard, readOnly); + final ShardRouting sr = getRouting(shard, readOnly); assignedShardsAdd(sr); unassignedShards.add(sr); } @@ -456,12 +447,8 @@ public class RoutingNodes implements Iterable { // no unassigned return; } - List shards = assignedShards.get(shard.shardId()); - if (shards == null) { - shards = new ArrayList<>(); - assignedShards.put(shard.shardId(), shards); - } - assert assertInstanceNotInList(shard, shards); + List shards = assignedShards.computeIfAbsent(shard.shardId(), k -> new ArrayList<>()); + assert assertInstanceNotInList(shard, shards); shards.add(shard); } @@ -671,7 +658,7 @@ public class RoutingNodes implements Iterable { } public void shuffle() { - Collections.shuffle(unassigned); + Randomness.shuffle(unassigned); } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 8dd71e3fba5..5ffaee0f2f9 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -267,7 +269,7 @@ public final class ShardRouting implements Streamable, ToXContent { return shardIdentifier; } - public boolean allocatedPostIndexCreate() { + public boolean allocatedPostIndexCreate(IndexMetaData indexMetaData) { if (active()) { return true; } @@ -279,6 +281,11 @@ public final class ShardRouting implements Streamable, ToXContent { return false; } + if (indexMetaData.activeAllocationIds(id()).isEmpty() && indexMetaData.getCreationVersion().onOrAfter(Version.V_3_0_0)) { + // when no shards with this id have ever been active for this index + return false; + } + return true; } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index feafb76a5f2..2268bf1d995 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -39,15 +39,12 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.Set; +import java.util.*; import java.util.function.Function; import java.util.stream.Collectors; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index b9ce532a611..e6dc9a65efd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -34,12 +34,13 @@ import org.elasticsearch.cluster.routing.allocation.StartedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.PriorityComparator; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.ArrayList; import java.util.Collection; @@ -72,42 +73,32 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; */ public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { - public static final String SETTING_THRESHOLD = "cluster.routing.allocation.balance.threshold"; - public static final String SETTING_INDEX_BALANCE_FACTOR = "cluster.routing.allocation.balance.index"; - public static final String SETTING_SHARD_BALANCE_FACTOR = "cluster.routing.allocation.balance.shard"; - - private static final float DEFAULT_INDEX_BALANCE_FACTOR = 0.55f; - private static final float DEFAULT_SHARD_BALANCE_FACTOR = 0.45f; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - final float indexBalance = settings.getAsFloat(SETTING_INDEX_BALANCE_FACTOR, weightFunction.indexBalance); - final float shardBalance = settings.getAsFloat(SETTING_SHARD_BALANCE_FACTOR, weightFunction.shardBalance); - float threshold = settings.getAsFloat(SETTING_THRESHOLD, BalancedShardsAllocator.this.threshold); - if (threshold <= 0.0f) { - throw new IllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold); - } - BalancedShardsAllocator.this.threshold = threshold; - BalancedShardsAllocator.this.weightFunction = new WeightFunction(indexBalance, shardBalance); - } - } - - private volatile WeightFunction weightFunction = new WeightFunction(DEFAULT_INDEX_BALANCE_FACTOR, DEFAULT_SHARD_BALANCE_FACTOR); - - private volatile float threshold = 1.0f; + public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER); + public static final Setting SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER); + public static final Setting THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER); + private volatile WeightFunction weightFunction; + private volatile float threshold; public BalancedShardsAllocator(Settings settings) { - this(settings, new NodeSettingsService(settings)); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @Inject - public BalancedShardsAllocator(Settings settings, NodeSettingsService nodeSettingsService) { + public BalancedShardsAllocator(Settings settings, ClusterSettings clusterSettings) { super(settings); - ApplySettings applySettings = new ApplySettings(); - applySettings.onRefreshSettings(settings); - nodeSettingsService.addListener(applySettings); + setWeightFunction(INDEX_BALANCE_FACTOR_SETTING.get(settings), SHARD_BALANCE_FACTOR_SETTING.get(settings)); + setThreshold(THRESHOLD_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(INDEX_BALANCE_FACTOR_SETTING, SHARD_BALANCE_FACTOR_SETTING, this::setWeightFunction); + clusterSettings.addSettingsUpdateConsumer(THRESHOLD_SETTING, this::setThreshold); + } + + private void setWeightFunction(float indexBalance, float shardBalanceFactor) { + weightFunction = new WeightFunction(indexBalance, shardBalanceFactor); + } + + private void setThreshold(float threshold) { + this.threshold = threshold; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index 6f7bbac8aea..a66c8ddaef7 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -24,10 +24,11 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.HashMap; import java.util.Map; @@ -76,37 +77,12 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; - public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES = "cluster.routing.allocation.awareness.attributes"; - public static final String CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP = "cluster.routing.allocation.awareness.force."; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - String[] awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null); - if (awarenessAttributes == null && "".equals(settings.get(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, null))) { - awarenessAttributes = Strings.EMPTY_ARRAY; // the empty string resets this - } - if (awarenessAttributes != null) { - logger.info("updating [cluster.routing.allocation.awareness.attributes] from [{}] to [{}]", AwarenessAllocationDecider.this.awarenessAttributes, awarenessAttributes); - AwarenessAllocationDecider.this.awarenessAttributes = awarenessAttributes; - } - Map forcedAwarenessAttributes = new HashMap<>(AwarenessAllocationDecider.this.forcedAwarenessAttributes); - Map forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP); - if (!forceGroups.isEmpty()) { - for (Map.Entry entry : forceGroups.entrySet()) { - String[] aValues = entry.getValue().getAsArray("values"); - if (aValues.length > 0) { - forcedAwarenessAttributes.put(entry.getKey(), aValues); - } - } - } - AwarenessAllocationDecider.this.forcedAwarenessAttributes = forcedAwarenessAttributes; - } - } + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER); private String[] awarenessAttributes; - private Map forcedAwarenessAttributes; + private volatile Map forcedAwarenessAttributes; /** * Creates a new {@link AwarenessAllocationDecider} instance @@ -121,24 +97,28 @@ public class AwarenessAllocationDecider extends AllocationDecider { * @param settings {@link Settings} to use */ public AwarenessAllocationDecider(Settings settings) { - this(settings, new NodeSettingsService(settings)); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @Inject - public AwarenessAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public AwarenessAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.awarenessAttributes = settings.getAsArray(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES); + this.awarenessAttributes = CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes); + setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, this::setForcedAwarenessAttributes); + } - forcedAwarenessAttributes = new HashMap<>(); - Map forceGroups = settings.getGroups(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP); + private void setForcedAwarenessAttributes(Settings forceSettings) { + Map forcedAwarenessAttributes = new HashMap<>(); + Map forceGroups = forceSettings.getAsGroups(); for (Map.Entry entry : forceGroups.entrySet()) { String[] aValues = entry.getValue().getAsArray("values"); if (aValues.length > 0) { forcedAwarenessAttributes.put(entry.getKey(), aValues); } } - - nodeSettingsService.addListener(new ApplySettings()); + this.forcedAwarenessAttributes = forcedAwarenessAttributes; } /** @@ -150,6 +130,10 @@ public class AwarenessAllocationDecider extends AllocationDecider { return this.awarenessAttributes; } + private void setAwarenessAttributes(String[] awarenessAttributes) { + this.awarenessAttributes = awarenessAttributes; + } + @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return underCapacity(shardRouting, node, allocation, true); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 7638c7aeee8..b1be2a6fce4 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -19,13 +19,12 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.Locale; @@ -38,10 +37,10 @@ import java.util.Locale; *

    *
  • indices_primaries_active - Re-balancing is allowed only once all * primary shards on all indices are active.
  • - * + * *
  • indices_all_active - Re-balancing is allowed only once all * shards on all indices are active.
  • - * + * *
  • always - Re-balancing is allowed once a shard replication group * is active
  • *
@@ -49,19 +48,10 @@ import java.util.Locale; public class ClusterRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "cluster_rebalance"; - - public static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance"; - public static final Validator ALLOCATION_ALLOW_REBALANCE_VALIDATOR = (setting, value, clusterState) -> { - try { - ClusterRebalanceType.parseString(value); - return null; - } catch (IllegalArgumentException e) { - return "the value of " + setting + " must be one of: [always, indices_primaries_active, indices_all_active]"; - } - }; + public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER); /** - * An enum representation for the configured re-balance type. + * An enum representation for the configured re-balance type. */ public static enum ClusterRebalanceType { /** @@ -73,7 +63,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { */ INDICES_PRIMARIES_ACTIVE, /** - * Re-balancing is allowed only once all shards on all indices are active. + * Re-balancing is allowed only once all shards on all indices are active. */ INDICES_ALL_ACTIVE; @@ -85,48 +75,28 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { } else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) { return ClusterRebalanceType.INDICES_ALL_ACTIVE; } - throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE + ": " + typeString); + throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING + ": " + typeString); } } - private ClusterRebalanceType type; + private volatile ClusterRebalanceType type; @Inject - public ClusterRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ClusterRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - String allowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "indices_all_active"); try { - type = ClusterRebalanceType.parseString(allowRebalance); + type = CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.get(settings); } catch (IllegalStateException e) { - logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, allowRebalance); + logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getRaw(settings)); type = ClusterRebalanceType.INDICES_ALL_ACTIVE; } - logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, type.toString().toLowerCase(Locale.ROOT)); + logger.debug("using [{}] with [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, type.toString().toLowerCase(Locale.ROOT)); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, this::setType); } - class ApplySettings implements NodeSettingsService.Listener { - - @Override - public void onRefreshSettings(Settings settings) { - String newAllowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, null); - if (newAllowRebalance != null) { - ClusterRebalanceType newType = null; - try { - newType = ClusterRebalanceType.parseString(newAllowRebalance); - } catch (IllegalArgumentException e) { - // ignore - } - - if (newType != null && newType != ClusterRebalanceAllocationDecider.this.type) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, - ClusterRebalanceAllocationDecider.this.type.toString().toLowerCase(Locale.ROOT), - newType.toString().toLowerCase(Locale.ROOT)); - ClusterRebalanceAllocationDecider.this.type = newType; - } - } - } + private void setType(ClusterRebalanceType type) { + this.type = type; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index 6bd1b437acf..a9ad35fd526 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -22,8 +22,9 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * Similar to the {@link ClusterRebalanceAllocationDecider} this @@ -41,27 +42,19 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { public static final String NAME = "concurrent_rebalance"; - public static final String CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE = "cluster.routing.allocation.cluster_concurrent_rebalance"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - int clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance); - if (clusterConcurrentRebalance != ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance) { - logger.info("updating [cluster.routing.allocation.cluster_concurrent_rebalance] from [{}], to [{}]", ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance, clusterConcurrentRebalance); - ConcurrentRebalanceAllocationDecider.this.clusterConcurrentRebalance = clusterConcurrentRebalance; - } - } - } - + public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER); private volatile int clusterConcurrentRebalance; @Inject - public ConcurrentRebalanceAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ConcurrentRebalanceAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.clusterConcurrentRebalance = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 2); + this.clusterConcurrentRebalance = CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.get(settings); logger.debug("using [cluster_concurrent_rebalance] with [{}]", clusterConcurrentRebalance); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, this::setClusterConcurrentRebalance); + } + + private void setClusterConcurrentRebalance(int concurrentRebalance) { + clusterConcurrentRebalance = concurrentRebalance; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index a02c72c5745..68fd7f3db94 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -22,26 +22,27 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.RatioValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.Set; @@ -80,53 +81,11 @@ public class DiskThresholdDecider extends AllocationDecider { private volatile boolean enabled; private volatile TimeValue rerouteInterval; - public static final String CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED = "cluster.routing.allocation.disk.threshold_enabled"; - public static final String CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.low"; - public static final String CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK = "cluster.routing.allocation.disk.watermark.high"; - public static final String CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS = "cluster.routing.allocation.disk.include_relocations"; - public static final String CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL = "cluster.routing.allocation.disk.reroute_interval"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - String newLowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, null); - String newHighWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, null); - Boolean newRelocationsSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, null); - Boolean newEnableSetting = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, null); - TimeValue newRerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, null); - - if (newEnableSetting != null) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, - DiskThresholdDecider.this.enabled, newEnableSetting); - DiskThresholdDecider.this.enabled = newEnableSetting; - } - if (newRelocationsSetting != null) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, - DiskThresholdDecider.this.includeRelocations, newRelocationsSetting); - DiskThresholdDecider.this.includeRelocations = newRelocationsSetting; - } - if (newLowWatermark != null) { - if (!validWatermarkSetting(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse low watermark [{}]", newLowWatermark); - } - logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, newLowWatermark); - DiskThresholdDecider.this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(newLowWatermark); - DiskThresholdDecider.this.freeBytesThresholdLow = thresholdBytesFromWatermark(newLowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK); - } - if (newHighWatermark != null) { - if (!validWatermarkSetting(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse high watermark [{}]", newHighWatermark); - } - logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, newHighWatermark); - DiskThresholdDecider.this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(newHighWatermark); - DiskThresholdDecider.this.freeBytesThresholdHigh = thresholdBytesFromWatermark(newHighWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK); - } - if (newRerouteInterval != null) { - logger.info("updating [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, newRerouteInterval); - DiskThresholdDecider.this.rerouteInterval = newRerouteInterval; - } - } - } + public static final Setting CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);; + public static final Setting CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); /** * Listens for a node to go over the high watermark and kicks off an empty @@ -231,38 +190,49 @@ public class DiskThresholdDecider extends AllocationDecider { // It's okay the Client is null here, because the empty cluster info // service will never actually call the listener where the client is // needed. Also this constructor is only used for tests - this(settings, new NodeSettingsService(settings), EmptyClusterInfoService.INSTANCE, null); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), EmptyClusterInfoService.INSTANCE, null); } @Inject - public DiskThresholdDecider(Settings settings, NodeSettingsService nodeSettingsService, ClusterInfoService infoService, Client client) { + public DiskThresholdDecider(Settings settings, ClusterSettings clusterSettings, ClusterInfoService infoService, Client client) { super(settings); - String lowWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "85%"); - String highWatermark = settings.get(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "90%"); - - if (!validWatermarkSetting(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse low watermark [{}]", lowWatermark); - } - if (!validWatermarkSetting(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK)) { - throw new ElasticsearchParseException("unable to parse high watermark [{}]", highWatermark); - } - // Watermark is expressed in terms of used data, but we need "free" data watermark - this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark); - this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark); - - this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK); - this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK); - this.includeRelocations = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true); - this.rerouteInterval = settings.getAsTime(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, TimeValue.timeValueSeconds(60)); - - this.enabled = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true); - nodeSettingsService.addListener(new ApplySettings()); + final String lowWatermark = CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.get(settings); + final String highWatermark = CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.get(settings); + setHighWatermark(highWatermark); + setLowWatermark(lowWatermark); + this.includeRelocations = CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.get(settings); + this.rerouteInterval = CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.get(settings); + this.enabled = CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, this::setLowWatermark); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, this::setHighWatermark); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, this::setIncludeRelocations); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, this::setRerouteInterval); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); infoService.addListener(new DiskListener(client)); } - // For Testing - ApplySettings newApplySettings() { - return new ApplySettings(); + private void setIncludeRelocations(boolean includeRelocations) { + this.includeRelocations = includeRelocations; + } + + private void setRerouteInterval(TimeValue rerouteInterval) { + this.rerouteInterval = rerouteInterval; + } + + private void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + private void setLowWatermark(String lowWatermark) { + // Watermark is expressed in terms of used data, but we need "free" data watermark + this.freeDiskThresholdLow = 100.0 - thresholdPercentageFromWatermark(lowWatermark); + this.freeBytesThresholdLow = thresholdBytesFromWatermark(lowWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey()); + } + + private void setHighWatermark(String highWatermark) { + // Watermark is expressed in terms of used data, but we need "free" data watermark + this.freeDiskThresholdHigh = 100.0 - thresholdPercentageFromWatermark(highWatermark); + this.freeBytesThresholdHigh = thresholdBytesFromWatermark(highWatermark, CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey()); } // For Testing @@ -360,7 +330,8 @@ public class DiskThresholdDecider extends AllocationDecider { } // a flag for whether the primary shard has been previously allocated - boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(); + IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex()); + boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData); // checks for exact byte comparisons if (freeBytes < freeBytesThresholdLow.bytes()) { @@ -580,20 +551,21 @@ public class DiskThresholdDecider extends AllocationDecider { /** * Checks if a watermark string is a valid percentage or byte size value, - * returning true if valid, false if invalid. + * @return the watermark value given */ - public boolean validWatermarkSetting(String watermark, String settingName) { + public static String validWatermarkSetting(String watermark, String settingName) { try { RatioValue.parseRatioValue(watermark); - return true; } catch (ElasticsearchParseException e) { try { ByteSizeValue.parseBytesSizeValue(watermark, settingName); - return true; } catch (ElasticsearchParseException ex) { - return false; + ex.addSuppressed(e); + throw ex; } } + return watermark; + } private Decision earlyTerminate(RoutingAllocation allocation, ImmutableOpenMap usages) { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index 0bbd4935044..a31d36db349 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -19,18 +19,20 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.Locale; /** - * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE} / - * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}. + * This allocation decider allows shard allocations / rebalancing via the cluster wide settings {@link #CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / + * {@link #CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} and the per index setting {@link #INDEX_ROUTING_ALLOCATION_ENABLE} / {@link #INDEX_ROUTING_REBALANCE_ENABLE}. * The per index settings overrides the cluster wide setting. * *

@@ -54,26 +56,34 @@ import java.util.Locale; * @see Rebalance * @see Allocation */ -public class EnableAllocationDecider extends AllocationDecider implements NodeSettingsService.Listener { +public class EnableAllocationDecider extends AllocationDecider { public static final String NAME = "enable"; - public static final String CLUSTER_ROUTING_ALLOCATION_ENABLE = "cluster.routing.allocation.enable"; - public static final String INDEX_ROUTING_ALLOCATION_ENABLE = "index.routing.allocation.enable"; + public static final Setting CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER); + public static final String INDEX_ROUTING_ALLOCATION_ENABLE= "index.routing.allocation.enable"; - public static final String CLUSTER_ROUTING_REBALANCE_ENABLE = "cluster.routing.rebalance.enable"; + public static final Setting CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER); public static final String INDEX_ROUTING_REBALANCE_ENABLE = "index.routing.rebalance.enable"; private volatile Rebalance enableRebalance; private volatile Allocation enableAllocation; - @Inject - public EnableAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public EnableAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.enableAllocation = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.ALL.name())); - this.enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, Rebalance.ALL.name())); - nodeSettingsService.addListener(this); + this.enableAllocation = CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get(settings); + this.enableRebalance = CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, this::setEnableAllocation); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, this::setEnableRebalance); + } + + public void setEnableRebalance(Rebalance enableRebalance) { + this.enableRebalance = enableRebalance; + } + + public void setEnableAllocation(Allocation enableAllocation) { + this.enableAllocation = enableAllocation; } @Override @@ -82,8 +92,8 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored"); } - Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings(); - String enableIndexValue = indexSettings.get(INDEX_ROUTING_ALLOCATION_ENABLE); + IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndex()); + String enableIndexValue = indexMetaData.getSettings().get(INDEX_ROUTING_ALLOCATION_ENABLE); final Allocation enable; if (enableIndexValue != null) { enable = Allocation.parse(enableIndexValue); @@ -96,7 +106,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe case NONE: return allocation.decision(Decision.NO, NAME, "no allocations are allowed"); case NEW_PRIMARIES: - if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate() == false) { + if (shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData) == false) { return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed"); } else { return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden"); @@ -148,25 +158,9 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe } } - @Override - public void onRefreshSettings(Settings settings) { - final Allocation enable = Allocation.parse(settings.get(CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation.name())); - if (enable != this.enableAllocation) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_ALLOCATION_ENABLE, this.enableAllocation, enable); - EnableAllocationDecider.this.enableAllocation = enable; - } - - final Rebalance enableRebalance = Rebalance.parse(settings.get(CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance.name())); - if (enableRebalance != this.enableRebalance) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_ROUTING_REBALANCE_ENABLE, this.enableRebalance, enableRebalance); - EnableAllocationDecider.this.enableRebalance = enableRebalance; - } - - } - /** * Allocation values or rather their string representation to be used used with - * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE} + * {@link EnableAllocationDecider#CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_ALLOCATION_ENABLE} * via cluster / index settings. */ public enum Allocation { @@ -192,7 +186,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe /** * Rebalance values or rather their string representation to be used used with - * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE} + * {@link EnableAllocationDecider#CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING} / {@link EnableAllocationDecider#INDEX_ROUTING_REBALANCE_ENABLE} * via cluster / index settings. */ public enum Rebalance { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java index e0e2caaf04a..4c451e7fffa 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/FilterAllocationDecider.java @@ -25,10 +25,9 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; - -import java.util.Map; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND; import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.OR; @@ -65,36 +64,23 @@ public class FilterAllocationDecider extends AllocationDecider { public static final String INDEX_ROUTING_INCLUDE_GROUP = "index.routing.allocation.include."; public static final String INDEX_ROUTING_EXCLUDE_GROUP = "index.routing.allocation.exclude."; - public static final String CLUSTER_ROUTING_REQUIRE_GROUP = "cluster.routing.allocation.require."; - public static final String CLUSTER_ROUTING_INCLUDE_GROUP = "cluster.routing.allocation.include."; - public static final String CLUSTER_ROUTING_EXCLUDE_GROUP = "cluster.routing.allocation.exclude."; + public static final Setting CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER); private volatile DiscoveryNodeFilters clusterRequireFilters; private volatile DiscoveryNodeFilters clusterIncludeFilters; private volatile DiscoveryNodeFilters clusterExcludeFilters; @Inject - public FilterAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public FilterAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - Map requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap(); - if (requireMap.isEmpty()) { - clusterRequireFilters = null; - } else { - clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap); - } - Map includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap(); - if (includeMap.isEmpty()) { - clusterIncludeFilters = null; - } else { - clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap); - } - Map excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap(); - if (excludeMap.isEmpty()) { - clusterExcludeFilters = null; - } else { - clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap); - } - nodeSettingsService.addListener(new ApplySettings()); + setClusterRequireFilters(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING.get(settings)); + setClusterExcludeFilters(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.get(settings)); + setClusterIncludeFilters(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, this::setClusterRequireFilters); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, this::setClusterExcludeFilters); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, this::setClusterIncludeFilters); } @Override @@ -144,21 +130,13 @@ public class FilterAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters"); } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - Map requireMap = settings.getByPrefix(CLUSTER_ROUTING_REQUIRE_GROUP).getAsMap(); - if (!requireMap.isEmpty()) { - clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, requireMap); - } - Map includeMap = settings.getByPrefix(CLUSTER_ROUTING_INCLUDE_GROUP).getAsMap(); - if (!includeMap.isEmpty()) { - clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, includeMap); - } - Map excludeMap = settings.getByPrefix(CLUSTER_ROUTING_EXCLUDE_GROUP).getAsMap(); - if (!excludeMap.isEmpty()) { - clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, excludeMap); - } - } + private void setClusterRequireFilters(Settings settings) { + clusterRequireFilters = DiscoveryNodeFilters.buildFromKeyValue(AND, settings.getAsMap()); + } + private void setClusterIncludeFilters(Settings settings) { + clusterIncludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap()); + } + private void setClusterExcludeFilters(Settings settings) { + clusterExcludeFilters = DiscoveryNodeFilters.buildFromKeyValue(OR, settings.getAsMap()); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java index 3d68ed50d27..9149d04cf60 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ShardsLimitAllocationDecider.java @@ -24,16 +24,16 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * This {@link AllocationDecider} limits the number of shards per node on a per * index or node-wide basis. The allocator prevents a single node to hold more * than {@value #INDEX_TOTAL_SHARDS_PER_NODE} per index and - * {@value #CLUSTER_TOTAL_SHARDS_PER_NODE} globally during the allocation + * cluster.routing.allocation.total_shards_per_node globally during the allocation * process. The limits of this decider can be changed in real-time via a the * index settings API. *

@@ -64,26 +64,18 @@ public class ShardsLimitAllocationDecider extends AllocationDecider { * Controls the maximum number of shards per node on a global level. * Negative values are interpreted as unlimited. */ - public static final String CLUSTER_TOTAL_SHARDS_PER_NODE = "cluster.routing.allocation.total_shards_per_node"; + public static final Setting CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, true, Setting.Scope.CLUSTER); - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - Integer newClusterLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, null); - - if (newClusterLimit != null) { - logger.info("updating [{}] from [{}] to [{}]", CLUSTER_TOTAL_SHARDS_PER_NODE, - ShardsLimitAllocationDecider.this.clusterShardLimit, newClusterLimit); - ShardsLimitAllocationDecider.this.clusterShardLimit = newClusterLimit; - } - } - } @Inject - public ShardsLimitAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ShardsLimitAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.clusterShardLimit = settings.getAsInt(CLUSTER_TOTAL_SHARDS_PER_NODE, -1); - nodeSettingsService.addListener(new ApplySettings()); + this.clusterShardLimit = CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, this::setClusterShardLimit); + } + + private void setClusterShardLimit(int clusterShardLimit) { + this.clusterShardLimit = clusterShardLimit; } @Override diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index 37b9f9f461b..597f0add8da 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -23,9 +23,10 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * This {@link org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider} prevents shards that @@ -38,18 +39,7 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { /** * Disables relocation of shards that are currently being snapshotted. */ - public static final String CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED = "cluster.routing.allocation.snapshot.relocation_enabled"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - boolean newEnableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation); - if (newEnableRelocation != enableRelocation) { - logger.info("updating [{}] from [{}], to [{}]", CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation, newEnableRelocation); - enableRelocation = newEnableRelocation; - } - } - } + public static final Setting CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER); private volatile boolean enableRelocation = false; @@ -66,14 +56,18 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider { * @param settings {@link org.elasticsearch.common.settings.Settings} to use */ public SnapshotInProgressAllocationDecider(Settings settings) { - this(settings, new NodeSettingsService(settings)); + this(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); } @Inject - public SnapshotInProgressAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public SnapshotInProgressAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - enableRelocation = settings.getAsBoolean(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED, enableRelocation); - nodeSettingsService.addListener(new ApplySettings()); + enableRelocation = CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, this::setEnableRelocation); + } + + private void setEnableRelocation(boolean enableRelocation) { + this.enableRelocation = enableRelocation; } /** diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index ed6814d83af..b97e6138674 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -19,13 +19,14 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; /** * {@link ThrottlingAllocationDecider} controls the recovery process per node in @@ -47,27 +48,33 @@ import org.elasticsearch.node.settings.NodeSettingsService; */ public class ThrottlingAllocationDecider extends AllocationDecider { - public static final String NAME = "throttling"; - - public static final String CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = "cluster.routing.allocation.node_initial_primaries_recoveries"; - public static final String CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = "cluster.routing.allocation.node_concurrent_recoveries"; - public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries"; - public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; + public static final String NAME = "throttling"; + public static final String CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES = "cluster.routing.allocation.concurrent_recoveries"; + + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", (s) -> s.get(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES,Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER); private volatile int primariesInitialRecoveries; private volatile int concurrentRecoveries; @Inject - public ThrottlingAllocationDecider(Settings settings, NodeSettingsService nodeSettingsService) { + public ThrottlingAllocationDecider(Settings settings, ClusterSettings clusterSettings) { super(settings); - - this.primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES); - this.concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES)); + this.primariesInitialRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.get(settings); + this.concurrentRecoveries = CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.get(settings); logger.debug("using node_concurrent_recoveries [{}], node_initial_primaries_recoveries [{}]", concurrentRecoveries, primariesInitialRecoveries); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, this::setPrimariesInitialRecoveries); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, this::setConcurrentRecoveries); + } - nodeSettingsService.addListener(new ApplySettings()); + private void setConcurrentRecoveries(int concurrentRecoveries) { + this.concurrentRecoveries = concurrentRecoveries; + } + + private void setPrimariesInitialRecoveries(int primariesInitialRecoveries) { + this.primariesInitialRecoveries = primariesInitialRecoveries; } @Override @@ -115,21 +122,4 @@ public class ThrottlingAllocationDecider extends AllocationDecider { return allocation.decision(Decision.YES, NAME, "below shard recovery limit of [%d]", concurrentRecoveries); } } - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - int primariesInitialRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, ThrottlingAllocationDecider.this.primariesInitialRecoveries); - if (primariesInitialRecoveries != ThrottlingAllocationDecider.this.primariesInitialRecoveries) { - logger.info("updating [cluster.routing.allocation.node_initial_primaries_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.primariesInitialRecoveries, primariesInitialRecoveries); - ThrottlingAllocationDecider.this.primariesInitialRecoveries = primariesInitialRecoveries; - } - - int concurrentRecoveries = settings.getAsInt(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, ThrottlingAllocationDecider.this.concurrentRecoveries); - if (concurrentRecoveries != ThrottlingAllocationDecider.this.concurrentRecoveries) { - logger.info("updating [cluster.routing.allocation.node_concurrent_recoveries] from [{}] to [{}]", ThrottlingAllocationDecider.this.concurrentRecoveries, concurrentRecoveries); - ThrottlingAllocationDecider.this.concurrentRecoveries = concurrentRecoveries; - } - } - } } diff --git a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index d4b15861846..5fc013b6633 100644 --- a/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/core/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -20,8 +20,19 @@ package org.elasticsearch.cluster.service; import org.elasticsearch.Version; -import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.AckedClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState.Builder; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; +import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.LocalNodeMasterListener; +import org.elasticsearch.cluster.TimeoutClusterStateListener; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.MetaData; @@ -38,20 +49,39 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.*; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; +import org.elasticsearch.common.util.concurrent.PrioritizedRunnable; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.*; -import java.util.concurrent.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Queue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Executor; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; @@ -62,8 +92,8 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF */ public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService { - public static final String SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD = "cluster.service.slow_task_logging_threshold"; - public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval"; + public static final Setting CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); + public static final Setting CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER); public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; private final ThreadPool threadPool; @@ -74,7 +104,7 @@ public class InternalClusterService extends AbstractLifecycleComponent threadPool.generic().execute(() -> { if (updateTask.processed.getAndSet(true) == false) { + logger.debug("cluster state update task [{}] timed out after [{}]", source, config.timeout()); listener.onFailure(source, new ProcessClusterEventTimeoutException(config.timeout(), source)); }})); } else { @@ -327,7 +357,7 @@ public class InternalClusterService extends AbstractLifecycleComponent updateTask : toExecute) { + assert batchResult.executionResults.containsKey(updateTask.task) : "missing task result for [" + updateTask.task + "]"; + } + } ClusterState newClusterState = batchResult.resultingState; final ArrayList> proccessedListeners = new ArrayList<>(); @@ -421,7 +460,13 @@ public class InternalClusterService extends AbstractLifecycleComponent proccessedListeners.add(updateTask), ex -> updateTask.listener.onFailure(updateTask.source, ex)); + executionResult.handle( + () -> proccessedListeners.add(updateTask), + ex -> { + logger.debug("cluster state update task [{}] failed", ex, updateTask.source); + updateTask.listener.onFailure(updateTask.source, ex); + } + ); } if (previousClusterState == newClusterState) { @@ -521,6 +566,15 @@ public class InternalClusterService extends AbstractLifecycleComponent clazz = Class.forName("com.carrotsearch.randomizedtesting.RandomizedContext"); + maybeCurrentMethod = clazz.getMethod("current"); + maybeGetRandomMethod = clazz.getMethod("getRandom"); + } catch (Throwable t) { + maybeCurrentMethod = null; + maybeGetRandomMethod = null; + } + currentMethod = maybeCurrentMethod; + getRandomMethod = maybeGetRandomMethod; + } + + private Randomness() {} + + /** + * Provides a reproducible source of randomness seeded by a long + * seed in the settings with the key setting. + * + * @param settings the settings containing the seed + * @param setting the key to access the seed + * @return a reproducible source of randomness + */ + public static Random get(Settings settings, String setting) { + Long maybeSeed = settings.getAsLong(setting, null); + if (maybeSeed != null) { + return new Random(maybeSeed); + } else { + return get(); + } + } + + /** + * Provides a source of randomness that is reproducible when + * running under the Elasticsearch test suite, and otherwise + * produces a non-reproducible source of randomness. Reproducible + * sources of randomness are created when the system property + * "tests.seed" is set and the security policy allows reading this + * system property. Otherwise, non-reproducible sources of + * randomness are created. + * + * @return a source of randomness + * @throws IllegalStateException if running tests but was not able + * to acquire an instance of Random from + * RandomizedContext or tests are + * running but tests.seed is not set + */ + public static Random get() { + if (currentMethod != null && getRandomMethod != null) { + try { + Object randomizedContext = currentMethod.invoke(null); + return (Random) getRandomMethod.invoke(randomizedContext); + } catch (ReflectiveOperationException e) { + // unexpected, bail + throw new IllegalStateException("running tests but failed to invoke RandomizedContext#getRandom", e); + } + } else { + return getWithoutSeed(); + } + } + + private static Random getWithoutSeed() { + assert currentMethod == null && getRandomMethod == null : "running under tests but tried to create non-reproducible random"; + return ThreadLocalRandom.current(); + } + + public static void shuffle(List list) { + Collections.shuffle(list, get()); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java index 62f29d2bad7..afcf8990513 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/EnvelopeBuilder.java @@ -25,26 +25,17 @@ import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; - import java.io.IOException; -import java.util.Locale; import java.util.Objects; public class EnvelopeBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.ENVELOPE; + public static final EnvelopeBuilder PROTOTYPE = new EnvelopeBuilder(); - protected Coordinate topLeft; - protected Coordinate bottomRight; - - public EnvelopeBuilder() { - this(Orientation.RIGHT); - } - - public EnvelopeBuilder(Orientation orientation) { - super(orientation); - } + private Coordinate topLeft; + private Coordinate bottomRight; public EnvelopeBuilder topLeft(Coordinate topLeft) { this.topLeft = topLeft; @@ -55,6 +46,10 @@ public class EnvelopeBuilder extends ShapeBuilder { return topLeft(coordinate(longitude, latitude)); } + public Coordinate topLeft() { + return this.topLeft; + } + public EnvelopeBuilder bottomRight(Coordinate bottomRight) { this.bottomRight = bottomRight; return this; @@ -64,11 +59,14 @@ public class EnvelopeBuilder extends ShapeBuilder { return bottomRight(coordinate(longitude, latitude)); } + public Coordinate bottomRight() { + return this.bottomRight; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); - builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_COORDINATES); toXContent(builder, topLeft); toXContent(builder, bottomRight); @@ -88,7 +86,7 @@ public class EnvelopeBuilder extends ShapeBuilder { @Override public int hashCode() { - return Objects.hash(orientation, topLeft, bottomRight); + return Objects.hash(topLeft, bottomRight); } @Override @@ -100,22 +98,19 @@ public class EnvelopeBuilder extends ShapeBuilder { return false; } EnvelopeBuilder other = (EnvelopeBuilder) obj; - return Objects.equals(orientation, other.orientation) && - Objects.equals(topLeft, other.topLeft) && + return Objects.equals(topLeft, other.topLeft) && Objects.equals(bottomRight, other.bottomRight); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(orientation == Orientation.RIGHT); writeCoordinateTo(topLeft, out); writeCoordinateTo(bottomRight, out); } @Override public EnvelopeBuilder readFrom(StreamInput in) throws IOException { - Orientation orientation = in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; - return new EnvelopeBuilder(orientation) + return new EnvelopeBuilder() .topLeft(readCoordinateFrom(in)) .bottomRight(readCoordinateFrom(in)); } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java index 45397ed962f..067cd014c0f 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/GeometryCollectionBuilder.java @@ -20,28 +20,26 @@ package org.elasticsearch.common.geo.builders; import com.spatial4j.core.shape.Shape; + import org.elasticsearch.common.geo.XShapeCollection; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; public class GeometryCollectionBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.GEOMETRYCOLLECTION; + public static final GeometryCollectionBuilder PROTOTYPE = new GeometryCollectionBuilder(); + protected final ArrayList shapes = new ArrayList<>(); - public GeometryCollectionBuilder() { - this(Orientation.RIGHT); - } - - public GeometryCollectionBuilder(Orientation orientation) { - super(orientation); - } - public GeometryCollectionBuilder shape(ShapeBuilder shape) { this.shapes.add(shape); return this; @@ -132,4 +130,39 @@ public class GeometryCollectionBuilder extends ShapeBuilder { //note: ShapeCollection is probably faster than a Multi* geom. } + @Override + public int hashCode() { + return Objects.hash(shapes); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + GeometryCollectionBuilder other = (GeometryCollectionBuilder) obj; + return Objects.equals(shapes, other.shapes); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(shapes.size()); + for (ShapeBuilder shape : shapes) { + out.writeShape(shape); + } + } + + @Override + public GeometryCollectionBuilder readFrom(StreamInput in) throws IOException { + GeometryCollectionBuilder geometryCollectionBuilder = new GeometryCollectionBuilder(); + int shapes = in.readVInt(); + for (int i = 0; i < shapes; i++) { + geometryCollectionBuilder.shape(in.readShape()); + } + return geometryCollectionBuilder; + } + } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java index c7ba9b72f55..464d72c8d8c 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/LineStringBuilder.java @@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Objects; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; + import com.spatial4j.core.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; @@ -34,6 +38,8 @@ public class LineStringBuilder extends PointCollection { public static final GeoShapeType TYPE = GeoShapeType.LINESTRING; + public static final LineStringBuilder PROTOTYPE = new LineStringBuilder(); + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -139,4 +145,39 @@ public class LineStringBuilder extends PointCollection { } return coordinates; } + + @Override + public int hashCode() { + return Objects.hash(points); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + LineStringBuilder other = (LineStringBuilder) obj; + return Objects.equals(points, other.points); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(points.size()); + for (Coordinate point : points) { + writeCoordinateTo(point, out); + } + } + + @Override + public LineStringBuilder readFrom(StreamInput in) throws IOException { + LineStringBuilder lineStringBuilder = new LineStringBuilder(); + int size = in.readVInt(); + for (int i=0; i < size; i++) { + lineStringBuilder.point(readCoordinateFrom(in)); + } + return lineStringBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java index a004b90a2dc..4703ac19b08 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilder.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.geo.builders; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import com.spatial4j.core.shape.Shape; @@ -29,11 +31,14 @@ import com.vividsolutions.jts.geom.LineString; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; +import java.util.Objects; public class MultiLineStringBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTILINESTRING; + public static final MultiLineStringBuilder PROTOTYPE = new MultiLineStringBuilder(); + private final ArrayList lines = new ArrayList<>(); public MultiLineStringBuilder linestring(LineStringBuilder line) { @@ -41,6 +46,10 @@ public class MultiLineStringBuilder extends ShapeBuilder { return this; } + public MultiLineStringBuilder linestring(Coordinate[] coordinates) { + return this.linestring(new LineStringBuilder().points(coordinates)); + } + public Coordinate[][] coordinates() { Coordinate[][] result = new Coordinate[lines.size()][]; for (int i = 0; i < result.length; i++) { @@ -92,4 +101,39 @@ public class MultiLineStringBuilder extends ShapeBuilder { } return jtsGeometry(geometry); } + + @Override + public int hashCode() { + return Objects.hash(lines); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MultiLineStringBuilder other = (MultiLineStringBuilder) obj; + return Objects.equals(lines, other.lines); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(lines.size()); + for (LineStringBuilder line : lines) { + line.writeTo(out); + } + } + + @Override + public MultiLineStringBuilder readFrom(StreamInput in) throws IOException { + MultiLineStringBuilder multiLineStringBuilder = new MultiLineStringBuilder(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + multiLineStringBuilder.linestring(LineStringBuilder.PROTOTYPE.readFrom(in)); + } + return multiLineStringBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java index 8d5cfabdabb..a4d236e3557 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPointBuilder.java @@ -22,18 +22,22 @@ package org.elasticsearch.common.geo.builders; import com.spatial4j.core.shape.Point; import com.spatial4j.core.shape.Shape; import com.vividsolutions.jts.geom.Coordinate; -import org.elasticsearch.common.geo.XShapeCollection; -import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.geo.XShapeCollection; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Objects; public class MultiPointBuilder extends PointCollection { - public static final GeoShapeType TYPE = GeoShapeType.MULTIPOINT; + public final static MultiPointBuilder PROTOTYPE = new MultiPointBuilder(); + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -52,7 +56,7 @@ public class MultiPointBuilder extends PointCollection { for (Coordinate coord : points) { shapes.add(SPATIAL_CONTEXT.makePoint(coord.x, coord.y)); } - XShapeCollection multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT); + XShapeCollection multiPoints = new XShapeCollection<>(shapes, SPATIAL_CONTEXT); multiPoints.setPointsOnly(true); return multiPoints; } @@ -61,4 +65,39 @@ public class MultiPointBuilder extends PointCollection { public GeoShapeType type() { return TYPE; } + + @Override + public int hashCode() { + return Objects.hash(points); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MultiPointBuilder other = (MultiPointBuilder) obj; + return Objects.equals(points, other.points); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(points.size()); + for (Coordinate point : points) { + writeCoordinateTo(point, out); + } + } + + @Override + public MultiPointBuilder readFrom(StreamInput in) throws IOException { + MultiPointBuilder multiPointBuilder = new MultiPointBuilder(); + int size = in.readVInt(); + for (int i=0; i < size; i++) { + multiPointBuilder.point(readCoordinateFrom(in)); + } + return multiPointBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java index e7762e51b61..2f9d595c9cb 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilder.java @@ -22,8 +22,12 @@ package org.elasticsearch.common.geo.builders; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Locale; +import java.util.Objects; import org.elasticsearch.common.geo.XShapeCollection; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import com.spatial4j.core.shape.Shape; @@ -32,26 +36,50 @@ import com.vividsolutions.jts.geom.Coordinate; public class MultiPolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.MULTIPOLYGON; + public static final MultiPolygonBuilder PROTOTYPE = new MultiPolygonBuilder(); - protected final ArrayList polygons = new ArrayList<>(); + private final ArrayList polygons = new ArrayList<>(); + + private Orientation orientation = Orientation.RIGHT; public MultiPolygonBuilder() { this(Orientation.RIGHT); } public MultiPolygonBuilder(Orientation orientation) { - super(orientation); + this.orientation = orientation; } + public Orientation orientation() { + return this.orientation; + } + + /** + * Add a shallow copy of the polygon to the multipolygon. This will apply the orientation of the + * {@link MultiPolygonBuilder} to the polygon if polygon has different orientation. + */ public MultiPolygonBuilder polygon(PolygonBuilder polygon) { - this.polygons.add(polygon); + PolygonBuilder pb = new PolygonBuilder(this.orientation); + pb.points(polygon.shell().coordinates(false)); + for (LineStringBuilder hole : polygon.holes()) { + pb.hole(hole); + } + this.polygons.add(pb); return this; } + /** + * get the list of polygons + */ + public ArrayList polygons() { + return polygons; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); + builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_COORDINATES); for(PolygonBuilder polygon : polygons) { builder.startArray(); @@ -89,4 +117,41 @@ public class MultiPolygonBuilder extends ShapeBuilder { return new XShapeCollection<>(shapes, SPATIAL_CONTEXT); //note: ShapeCollection is probably faster than a Multi* geom. } + + @Override + public int hashCode() { + return Objects.hash(polygons, orientation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + MultiPolygonBuilder other = (MultiPolygonBuilder) obj; + return Objects.equals(polygons, other.polygons) && + Objects.equals(orientation, other.orientation); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + orientation.writeTo(out); + out.writeVInt(polygons.size()); + for (PolygonBuilder polygon : polygons) { + polygon.writeTo(out); + } + } + + @Override + public MultiPolygonBuilder readFrom(StreamInput in) throws IOException { + MultiPolygonBuilder polyBuilder = new MultiPolygonBuilder(Orientation.readFrom(in)); + int holes = in.readVInt(); + for (int i = 0; i < holes; i++) { + polyBuilder.polygon(PolygonBuilder.PROTOTYPE.readFrom(in)); + } + return polyBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java index d6d62c28b8c..35225461658 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PointBuilder.java @@ -32,7 +32,6 @@ import com.vividsolutions.jts.geom.Coordinate; public class PointBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POINT; - public static final PointBuilder PROTOTYPE = new PointBuilder(); private Coordinate coordinate; diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java index 04540df27e9..03ff6a6b892 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/PolygonBuilder.java @@ -29,6 +29,8 @@ import com.vividsolutions.jts.geom.MultiPolygon; import com.vividsolutions.jts.geom.Polygon; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,6 +41,9 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.List; +import java.util.Locale; +import java.util.Objects; /** * The {@link PolygonBuilder} implements the groundwork to create polygons. This contains @@ -48,6 +53,11 @@ import java.util.concurrent.atomic.AtomicBoolean; public class PolygonBuilder extends ShapeBuilder { public static final GeoShapeType TYPE = GeoShapeType.POLYGON; + public static final PolygonBuilder PROTOTYPE = new PolygonBuilder(); + + private static final Coordinate[][] EMPTY = new Coordinate[0][]; + + private Orientation orientation = Orientation.RIGHT; // line string defining the shell of the polygon private LineStringBuilder shell; @@ -56,7 +66,7 @@ public class PolygonBuilder extends ShapeBuilder { private final ArrayList holes = new ArrayList<>(); public PolygonBuilder() { - this(new ArrayList(), Orientation.RIGHT); + this(Orientation.RIGHT); } public PolygonBuilder(Orientation orientation) { @@ -64,10 +74,14 @@ public class PolygonBuilder extends ShapeBuilder { } public PolygonBuilder(ArrayList points, Orientation orientation) { - super(orientation); + this.orientation = orientation; this.shell = new LineStringBuilder().points(points); } + public Orientation orientation() { + return this.orientation; + } + public PolygonBuilder point(double longitude, double latitude) { shell.point(longitude, latitude); return this; @@ -103,6 +117,20 @@ public class PolygonBuilder extends ShapeBuilder { return this; } + /** + * @return the list of holes defined for this polygon + */ + public List holes() { + return this.holes; + } + + /** + * @return the list of points of the shell for this polygon + */ + public LineStringBuilder shell() { + return this.shell; + } + /** * Close the shell of the polygon */ @@ -175,6 +203,7 @@ public class PolygonBuilder extends ShapeBuilder { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(FIELD_TYPE, TYPE.shapeName()); + builder.field(FIELD_ORIENTATION, orientation.name().toLowerCase(Locale.ROOT)); builder.startArray(FIELD_COORDINATES); coordinatesArray(builder, params); builder.endArray(); @@ -357,8 +386,6 @@ public class PolygonBuilder extends ShapeBuilder { return result; } - private static final Coordinate[][] EMPTY = new Coordinate[0][]; - private static Coordinate[][] holes(Edge[] holes, int numHoles) { if (numHoles == 0) { return EMPTY; @@ -663,4 +690,44 @@ public class PolygonBuilder extends ShapeBuilder { } } } + + @Override + public int hashCode() { + return Objects.hash(shell, holes, orientation); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + PolygonBuilder other = (PolygonBuilder) obj; + return Objects.equals(shell, other.shell) && + Objects.equals(holes, other.holes) && + Objects.equals(orientation, other.orientation); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + orientation.writeTo(out); + shell.writeTo(out); + out.writeVInt(holes.size()); + for (LineStringBuilder hole : holes) { + hole.writeTo(out); + } + } + + @Override + public PolygonBuilder readFrom(StreamInput in) throws IOException { + PolygonBuilder polyBuilder = new PolygonBuilder(Orientation.readFrom(in)); + polyBuilder.shell = LineStringBuilder.PROTOTYPE.readFrom(in); + int holes = in.readVInt(); + for (int i = 0; i < holes; i++) { + polyBuilder.hole(LineStringBuilder.PROTOTYPE.readFrom(in)); + } + return polyBuilder; + } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index d8689ee737f..fcd8177ac6c 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -77,16 +77,10 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri /** @see com.spatial4j.core.shape.jts.JtsGeometry#index() */ protected final boolean autoIndexJtsGeometry = true;//may want to turn off once SpatialStrategy impls do it. - protected Orientation orientation = Orientation.RIGHT; - protected ShapeBuilder() { } - protected ShapeBuilder(Orientation orientation) { - this.orientation = orientation; - } - protected static Coordinate coordinate(double longitude, double latitude) { return new Coordinate(longitude, latitude); } @@ -186,22 +180,6 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri return new Coordinate(in.readDouble(), in.readDouble()); } - public static Orientation orientationFromString(String orientation) { - orientation = orientation.toLowerCase(Locale.ROOT); - switch (orientation) { - case "right": - case "counterclockwise": - case "ccw": - return Orientation.RIGHT; - case "left": - case "clockwise": - case "cw": - return Orientation.LEFT; - default: - throw new IllegalArgumentException("Unknown orientation [" + orientation + "]"); - } - } - protected static Coordinate shift(Coordinate coordinate, double dateline) { if (dateline == 0) { return coordinate; @@ -408,6 +386,30 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri public static final Orientation COUNTER_CLOCKWISE = Orientation.RIGHT; public static final Orientation CW = Orientation.LEFT; public static final Orientation CCW = Orientation.RIGHT; + + public void writeTo (StreamOutput out) throws IOException { + out.writeBoolean(this == Orientation.RIGHT); + } + + public static Orientation readFrom (StreamInput in) throws IOException { + return in.readBoolean() ? Orientation.RIGHT : Orientation.LEFT; + } + + public static Orientation fromString(String orientation) { + orientation = orientation.toLowerCase(Locale.ROOT); + switch (orientation) { + case "right": + case "counterclockwise": + case "ccw": + return Orientation.RIGHT; + case "left": + case "clockwise": + case "cw": + return Orientation.LEFT; + default: + throw new IllegalArgumentException("Unknown orientation [" + orientation + "]"); + } + } } public static final String FIELD_TYPE = "type"; @@ -498,7 +500,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri radius = Distance.parseDistance(parser.text()); } else if (FIELD_ORIENTATION.equals(fieldName)) { parser.nextToken(); - requestedOrientation = orientationFromString(parser.text()); + requestedOrientation = Orientation.fromString(parser.text()); } else { parser.nextToken(); parser.skipChildren(); @@ -524,7 +526,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri case POLYGON: return parsePolygon(node, requestedOrientation, coerce); case MULTIPOLYGON: return parseMultiPolygon(node, requestedOrientation, coerce); case CIRCLE: return parseCircle(node, radius); - case ENVELOPE: return parseEnvelope(node, requestedOrientation); + case ENVELOPE: return parseEnvelope(node); case GEOMETRYCOLLECTION: return geometryCollections; default: throw new ElasticsearchParseException("shape type [{}] not included", shapeType); @@ -550,7 +552,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri return ShapeBuilders.newCircleBuilder().center(coordinates.coordinate).radius(radius); } - protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates, final Orientation orientation) { + protected static EnvelopeBuilder parseEnvelope(CoordinateNode coordinates) { // validate the coordinate array for envelope type if (coordinates.children.size() != 2) { throw new ElasticsearchParseException("invalid number of points [{}] provided for " + @@ -564,7 +566,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri uL = new Coordinate(Math.min(uL.x, lR.x), Math.max(uL.y, lR.y)); lR = new Coordinate(Math.max(uLtmp.x, lR.x), Math.min(uLtmp.y, lR.y)); } - return ShapeBuilders.newEnvelope(orientation).topLeft(uL).bottomRight(lR); + return ShapeBuilders.newEnvelope().topLeft(uL).bottomRight(lR); } protected static void validateMultiPointNode(CoordinateNode coordinates) { @@ -684,8 +686,7 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri } XContentParser.Token token = parser.nextToken(); - GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection( (mapper == null) ? Orientation.RIGHT : mapper - .fieldType().orientation()); + GeometryCollectionBuilder geometryCollection = ShapeBuilders.newGeometryCollection(); while (token != XContentParser.Token.END_ARRAY) { ShapeBuilder shapeBuilder = GeoShapeType.parse(parser); geometryCollection.shape(shapeBuilder); @@ -700,15 +701,4 @@ public abstract class ShapeBuilder extends ToXContentToBytes implements NamedWri public String getWriteableName() { return type().shapeName(); } - - // NORELEASE this should be deleted as soon as all shape builders implement writable - @Override - public void writeTo(StreamOutput out) throws IOException { - } - - // NORELEASE this should be deleted as soon as all shape builders implement writable - @Override - public ShapeBuilder readFrom(StreamInput in) throws IOException { - return null; - } } diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java new file mode 100644 index 00000000000..c66e969aa3a --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilderRegistry.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import org.elasticsearch.common.geo.ShapesAvailability; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; + +/** + * Register the shape builder prototypes with the {@link NamedWriteableRegistry} + */ +public class ShapeBuilderRegistry { + + @Inject + public ShapeBuilderRegistry(NamedWriteableRegistry namedWriteableRegistry) { + if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE); + } + } +} diff --git a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java index e294a9d6ef7..61d7a9cd07e 100644 --- a/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java +++ b/core/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilders.java @@ -110,15 +110,6 @@ public class ShapeBuilders { return new GeometryCollectionBuilder(); } - /** - * Create a new GeometryCollection - * - * @return a new {@link GeometryCollectionBuilder} - */ - public static GeometryCollectionBuilder newGeometryCollection(ShapeBuilder.Orientation orientation) { - return new GeometryCollectionBuilder(orientation); - } - /** * create a new Circle * @@ -136,13 +127,4 @@ public class ShapeBuilders { public static EnvelopeBuilder newEnvelope() { return new EnvelopeBuilder(); } - - /** - * create a new rectangle - * - * @return a new {@link EnvelopeBuilder} - */ - public static EnvelopeBuilder newEnvelope(ShapeBuilder.Orientation orientation) { - return new EnvelopeBuilder(orientation); - } } diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 20859e2716a..ffcb4201f4d 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -33,7 +33,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; @@ -256,13 +256,13 @@ public abstract class StreamInput extends InputStream { if (length == -1) { return null; } - return new StringAndBytesText(readBytesReference(length)); + return new Text(readBytesReference(length)); } public Text readText() throws IOException { // use StringAndBytes so we can cache the string if its ever converted to it int length = readInt(); - return new StringAndBytesText(readBytesReference(length)); + return new Text(readBytesReference(length)); } @Nullable @@ -629,6 +629,13 @@ public abstract class StreamInput extends InputStream { return readNamedWriteable(QueryBuilder.class); } + /** + * Reads a {@link ShapeBuilder} from the current stream + */ + public ShapeBuilder readShape() throws IOException { + return readNamedWriteable(ShapeBuilder.class); + } + /** * Reads a {@link org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder} from the current stream */ diff --git a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 5f1e7623d28..e8997b8073f 100644 --- a/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/core/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -32,6 +32,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; @@ -618,6 +619,13 @@ public abstract class StreamOutput extends OutputStream { writeNamedWriteable(queryBuilder); } + /** + * Writes a {@link ShapeBuilder} to the current stream + */ + public void writeShape(ShapeBuilder shapeBuilder) throws IOException { + writeNamedWriteable(shapeBuilder); + } + /** * Writes a {@link ScoreFunctionBuilder} to the current stream */ diff --git a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java index 7191c96e33e..4fe90aed9e4 100644 --- a/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java +++ b/core/src/main/java/org/elasticsearch/common/lucene/all/AllTermQuery.java @@ -149,6 +149,10 @@ public final class AllTermQuery extends Query { return null; } final TermState state = termStates.get(context.ord); + if (state == null) { + // Term does not exist in this segment + return null; + } termsEnum.seekExact(term.bytes(), state); PostingsEnum docs = termsEnum.postings(null, PostingsEnum.PAYLOADS); assert docs != null; diff --git a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java index c1f282ac234..f7eab3da2ac 100644 --- a/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/core/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -19,21 +19,362 @@ package org.elasticsearch.common.network; +import org.elasticsearch.client.support.Headers; +import org.elasticsearch.client.transport.TransportClientNodesService; +import org.elasticsearch.client.transport.support.TransportProxyClient; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.ExtensionPoint; +import org.elasticsearch.http.HttpServer; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.netty.NettyHttpServerTransport; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; +import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; +import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; +import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction; +import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction; +import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction; +import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction; +import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction; +import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction; +import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction; +import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction; +import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction; +import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction; +import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction; +import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction; +import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction; +import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction; +import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction; +import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction; +import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction; +import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction; +import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction; +import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction; +import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction; +import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction; +import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction; +import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction; +import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction; +import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction; +import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction; +import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; +import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction; +import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction; +import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction; +import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction; +import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction; +import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction; +import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction; +import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; +import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; +import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction; +import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction; +import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction; +import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction; +import org.elasticsearch.rest.action.bulk.RestBulkAction; +import org.elasticsearch.rest.action.cat.AbstractCatAction; +import org.elasticsearch.rest.action.cat.RestAliasAction; +import org.elasticsearch.rest.action.cat.RestAllocationAction; +import org.elasticsearch.rest.action.cat.RestCatAction; +import org.elasticsearch.rest.action.cat.RestFielddataAction; +import org.elasticsearch.rest.action.cat.RestHealthAction; +import org.elasticsearch.rest.action.cat.RestIndicesAction; +import org.elasticsearch.rest.action.cat.RestMasterAction; +import org.elasticsearch.rest.action.cat.RestNodeAttrsAction; +import org.elasticsearch.rest.action.cat.RestNodesAction; +import org.elasticsearch.rest.action.cat.RestPluginsAction; +import org.elasticsearch.rest.action.cat.RestRepositoriesAction; +import org.elasticsearch.rest.action.cat.RestSegmentsAction; +import org.elasticsearch.rest.action.cat.RestShardsAction; +import org.elasticsearch.rest.action.cat.RestSnapshotAction; +import org.elasticsearch.rest.action.cat.RestThreadPoolAction; +import org.elasticsearch.rest.action.delete.RestDeleteAction; +import org.elasticsearch.rest.action.explain.RestExplainAction; +import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; +import org.elasticsearch.rest.action.get.RestGetAction; +import org.elasticsearch.rest.action.get.RestGetSourceAction; +import org.elasticsearch.rest.action.get.RestHeadAction; +import org.elasticsearch.rest.action.get.RestMultiGetAction; +import org.elasticsearch.rest.action.index.RestIndexAction; +import org.elasticsearch.rest.action.main.RestMainAction; +import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction; +import org.elasticsearch.rest.action.percolate.RestPercolateAction; +import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction; +import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction; +import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction; +import org.elasticsearch.rest.action.search.RestClearScrollAction; +import org.elasticsearch.rest.action.search.RestMultiSearchAction; +import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.rest.action.search.RestSearchScrollAction; +import org.elasticsearch.rest.action.suggest.RestSuggestAction; +import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction; +import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction; +import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction; +import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction; +import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction; +import org.elasticsearch.rest.action.update.RestUpdateAction; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.local.LocalTransport; +import org.elasticsearch.transport.netty.NettyTransport; + +import java.util.Arrays; +import java.util.List; /** - * + * A module to handle registering and binding all network related classes. */ public class NetworkModule extends AbstractModule { - private final NetworkService networkService; + public static final String TRANSPORT_TYPE_KEY = "transport.type"; + public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type"; - public NetworkModule(NetworkService networkService) { + public static final String LOCAL_TRANSPORT = "local"; + public static final String NETTY_TRANSPORT = "netty"; + + public static final String HTTP_TYPE_KEY = "http.type"; + public static final String HTTP_ENABLED = "http.enabled"; + + private static final List> builtinRestHandlers = Arrays.asList( + RestMainAction.class, + + RestNodesInfoAction.class, + RestNodesStatsAction.class, + RestNodesHotThreadsAction.class, + RestClusterStatsAction.class, + RestClusterStateAction.class, + RestClusterHealthAction.class, + RestClusterUpdateSettingsAction.class, + RestClusterGetSettingsAction.class, + RestClusterRerouteAction.class, + RestClusterSearchShardsAction.class, + RestPendingClusterTasksAction.class, + RestPutRepositoryAction.class, + RestGetRepositoriesAction.class, + RestDeleteRepositoryAction.class, + RestVerifyRepositoryAction.class, + RestGetSnapshotsAction.class, + RestCreateSnapshotAction.class, + RestRestoreSnapshotAction.class, + RestDeleteSnapshotAction.class, + RestSnapshotsStatusAction.class, + + RestIndicesExistsAction.class, + RestTypesExistsAction.class, + RestGetIndicesAction.class, + RestIndicesStatsAction.class, + RestIndicesSegmentsAction.class, + RestIndicesShardStoresAction.class, + RestGetAliasesAction.class, + RestAliasesExistAction.class, + RestIndexDeleteAliasesAction.class, + RestIndexPutAliasAction.class, + RestIndicesAliasesAction.class, + RestGetIndicesAliasesAction.class, + RestCreateIndexAction.class, + RestDeleteIndexAction.class, + RestCloseIndexAction.class, + RestOpenIndexAction.class, + + RestUpdateSettingsAction.class, + RestGetSettingsAction.class, + + RestAnalyzeAction.class, + RestGetIndexTemplateAction.class, + RestPutIndexTemplateAction.class, + RestDeleteIndexTemplateAction.class, + RestHeadIndexTemplateAction.class, + + RestPutWarmerAction.class, + RestDeleteWarmerAction.class, + RestGetWarmerAction.class, + + RestPutMappingAction.class, + RestGetMappingAction.class, + RestGetFieldMappingAction.class, + + RestRefreshAction.class, + RestFlushAction.class, + RestSyncedFlushAction.class, + RestForceMergeAction.class, + RestUpgradeAction.class, + RestClearIndicesCacheAction.class, + + RestIndexAction.class, + RestGetAction.class, + RestGetSourceAction.class, + RestHeadAction.class, + RestMultiGetAction.class, + RestDeleteAction.class, + org.elasticsearch.rest.action.count.RestCountAction.class, + RestSuggestAction.class, + RestTermVectorsAction.class, + RestMultiTermVectorsAction.class, + RestBulkAction.class, + RestUpdateAction.class, + RestPercolateAction.class, + RestMultiPercolateAction.class, + + RestSearchAction.class, + RestSearchScrollAction.class, + RestClearScrollAction.class, + RestMultiSearchAction.class, + RestRenderSearchTemplateAction.class, + + RestValidateQueryAction.class, + + RestExplainAction.class, + + RestRecoveryAction.class, + + // Templates API + RestGetSearchTemplateAction.class, + RestPutSearchTemplateAction.class, + RestDeleteSearchTemplateAction.class, + + // Scripts API + RestGetIndexedScriptAction.class, + RestPutIndexedScriptAction.class, + RestDeleteIndexedScriptAction.class, + + RestFieldStatsAction.class, + + // no abstract cat action + RestCatAction.class + ); + + private static final List> builtinCatHandlers = Arrays.asList( + RestAllocationAction.class, + RestShardsAction.class, + RestMasterAction.class, + RestNodesAction.class, + RestIndicesAction.class, + RestSegmentsAction.class, + // Fully qualified to prevent interference with rest.action.count.RestCountAction + org.elasticsearch.rest.action.cat.RestCountAction.class, + // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction + org.elasticsearch.rest.action.cat.RestRecoveryAction.class, + RestHealthAction.class, + org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class, + RestAliasAction.class, + RestThreadPoolAction.class, + RestPluginsAction.class, + RestFielddataAction.class, + RestNodeAttrsAction.class, + RestRepositoriesAction.class, + RestSnapshotAction.class + ); + + private final NetworkService networkService; + private final Settings settings; + private final boolean transportClient; + + private final ExtensionPoint.SelectedType transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class); + private final ExtensionPoint.SelectedType transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class); + private final ExtensionPoint.SelectedType httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class); + private final ExtensionPoint.ClassSet restHandlers = new ExtensionPoint.ClassSet<>("rest_handler", RestHandler.class); + // we must separate the cat rest handlers so RestCatAction can collect them... + private final ExtensionPoint.ClassSet catHandlers = new ExtensionPoint.ClassSet<>("cat_handler", AbstractCatAction.class); + + /** + * Creates a network module that custom networking classes can be plugged into. + * + * @param networkService A constructed network service object to bind. + * @param settings The settings for the node + * @param transportClient True if only transport classes should be allowed to be registered, false otherwise. + */ + public NetworkModule(NetworkService networkService, Settings settings, boolean transportClient) { this.networkService = networkService; + this.settings = settings; + this.transportClient = transportClient; + registerTransportService(NETTY_TRANSPORT, TransportService.class); + registerTransport(LOCAL_TRANSPORT, LocalTransport.class); + registerTransport(NETTY_TRANSPORT, NettyTransport.class); + + if (transportClient == false) { + registerHttpTransport(NETTY_TRANSPORT, NettyHttpServerTransport.class); + + for (Class catAction : builtinCatHandlers) { + catHandlers.registerExtension(catAction); + } + for (Class restAction : builtinRestHandlers) { + restHandlers.registerExtension(restAction); + } + } + } + + /** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */ + public void registerTransportService(String name, Class clazz) { + transportServiceTypes.registerExtension(name, clazz); + } + + /** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */ + public void registerTransport(String name, Class clazz) { + transportTypes.registerExtension(name, clazz); + } + + /** Adds an http transport implementation that can be selected by setting {@link #HTTP_TYPE_KEY}. */ + // TODO: we need another name than "http transport"....so confusing with transportClient... + public void registerHttpTransport(String name, Class clazz) { + if (transportClient) { + throw new IllegalArgumentException("Cannot register http transport " + clazz.getName() + " for transport client"); + } + httpTransportTypes.registerExtension(name, clazz); + } + + /** Adds an additional rest action. */ + // TODO: change this further to eliminate the middle man, ie RestController, and just register method and path here + public void registerRestHandler(Class clazz) { + if (transportClient) { + throw new IllegalArgumentException("Cannot register rest handler " + clazz.getName() + " for transport client"); + } + if (AbstractCatAction.class.isAssignableFrom(clazz)) { + catHandlers.registerExtension(clazz.asSubclass(AbstractCatAction.class)); + } else { + restHandlers.registerExtension(clazz); + } } @Override protected void configure() { bind(NetworkService.class).toInstance(networkService); + bind(NamedWriteableRegistry.class).asEagerSingleton(); + + transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, NETTY_TRANSPORT); + String defaultTransport = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT; + transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, defaultTransport); + + if (transportClient) { + bind(Headers.class).asEagerSingleton(); + bind(TransportProxyClient.class).asEagerSingleton(); + bind(TransportClientNodesService.class).asEagerSingleton(); + } else { + if (settings.getAsBoolean(HTTP_ENABLED, true)) { + bind(HttpServer.class).asEagerSingleton(); + httpTransportTypes.bindType(binder(), settings, HTTP_TYPE_KEY, NETTY_TRANSPORT); + } + bind(RestController.class).asEagerSingleton(); + catHandlers.bind(binder()); + restHandlers.bind(binder()); + } } } diff --git a/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java new file mode 100644 index 00000000000..13743cabcf6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -0,0 +1,252 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.settings; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.component.AbstractComponent; + +import java.util.*; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * A basic setting service that can be used for per-index and per-cluster settings. + * This service offers transactional application of updates settings. + */ +public abstract class AbstractScopedSettings extends AbstractComponent { + private Settings lastSettingsApplied = Settings.EMPTY; + private final List settingUpdaters = new ArrayList<>(); + private final Map> complexMatchers = new HashMap<>(); + private final Map> keySettings = new HashMap<>(); + private final Setting.Scope scope; + + protected AbstractScopedSettings(Settings settings, Set> settingsSet, Setting.Scope scope) { + super(settings); + for (Setting entry : settingsSet) { + if (entry.getScope() != scope) { + throw new IllegalArgumentException("Setting must be a cluster setting but was: " + entry.getScope()); + } + if (entry.hasComplexMatcher()) { + complexMatchers.put(entry.getKey(), entry); + } else { + keySettings.put(entry.getKey(), entry); + } + } + this.scope = scope; + } + + public Setting.Scope getScope() { + return this.scope; + } + + /** + * Applies the given settings to all listeners and rolls back the result after application. This + * method will not change any settings but will fail if any of the settings can't be applied. + */ + public synchronized Settings dryRun(Settings settings) { + final Settings current = Settings.builder().put(this.settings).put(settings).build(); + final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build(); + List exceptions = new ArrayList<>(); + for (SettingUpdater settingUpdater : settingUpdaters) { + try { + if (settingUpdater.hasChanged(current, previous)) { + settingUpdater.getValue(current, previous); + } + } catch (RuntimeException ex) { + exceptions.add(ex); + logger.debug("failed to prepareCommit settings for [{}]", ex, settingUpdater); + } + } + // here we are exhaustive and record all settings that failed. + ExceptionsHelper.rethrowAndSuppress(exceptions); + return current; + } + + /** + * Applies the given settings to all the settings consumers or to none of them. The settings + * will be merged with the node settings before they are applied while given settings override existing node + * settings. + * @param newSettings the settings to apply + * @return the unmerged applied settings + */ + public synchronized Settings applySettings(Settings newSettings) { + if (lastSettingsApplied != null && newSettings.equals(lastSettingsApplied)) { + // nothing changed in the settings, ignore + return newSettings; + } + final Settings current = Settings.builder().put(this.settings).put(newSettings).build(); + final Settings previous = Settings.builder().put(this.settings).put(this.lastSettingsApplied).build(); + try { + List applyRunnables = new ArrayList<>(); + for (SettingUpdater settingUpdater : settingUpdaters) { + try { + applyRunnables.add(settingUpdater.updater(current, previous)); + } catch (Exception ex) { + logger.warn("failed to prepareCommit settings for [{}]", ex, settingUpdater); + throw ex; + } + } + for (Runnable settingUpdater : applyRunnables) { + settingUpdater.run(); + } + } catch (Exception ex) { + logger.warn("failed to apply settings", ex); + throw ex; + } finally { + } + return lastSettingsApplied = newSettings; + } + + /** + * Adds a settings consumer with a predicate that is only evaluated at update time. + *

+ * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. + *

+ * @param validator an additional validator that is only applied to updates of this setting. + * This is useful to add additional validation to settings at runtime compared to at startup time. + */ + public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer, Consumer validator) { + if (setting != get(setting.getKey())) { + throw new IllegalArgumentException("Setting is not registered for key [" + setting.getKey() + "]"); + } + this.settingUpdaters.add(setting.newUpdater(consumer, logger, validator)); + } + + /** + * Adds a settings consumer that accepts the values for two settings. The consumer if only notified if one or both settings change. + *

+ * Note: Only settings registered in {@link SettingsModule} can be changed dynamically. + *

+ * This method registers a compound updater that is useful if two settings are depending on each other. The consumer is always provided + * with both values even if only one of the two changes. + */ + public synchronized void addSettingsUpdateConsumer(Setting a, Setting b, BiConsumer consumer) { + if (a != get(a.getKey())) { + throw new IllegalArgumentException("Setting is not registered for key [" + a.getKey() + "]"); + } + if (b != get(b.getKey())) { + throw new IllegalArgumentException("Setting is not registered for key [" + b.getKey() + "]"); + } + this.settingUpdaters.add(Setting.compoundUpdater(consumer, a, b, logger)); + } + + /** + * Adds a settings consumer. + *

+ * Note: Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. + *

+ */ + public synchronized void addSettingsUpdateConsumer(Setting setting, Consumer consumer) { + addSettingsUpdateConsumer(setting, consumer, (s) -> {}); + } + + /** + * Transactional interface to update settings. + * @see Setting + */ + public interface SettingUpdater { + + /** + * Returns true if this updaters setting has changed with the current update + * @param current the current settings + * @param previous the previous setting + * @return true if this updaters setting has changed with the current update + */ + boolean hasChanged(Settings current, Settings previous); + + /** + * Returns the instance value for the current settings. This method is stateless and idempotent. + * This method will throw an exception if the source of this value is invalid. + */ + T getValue(Settings current, Settings previous); + + /** + * Applies the given value to the updater. This methods will actually run the update. + */ + void apply(T value, Settings current, Settings previous); + + /** + * Updates this updaters value if it has changed. + * @return true iff the value has been updated. + */ + default boolean apply(Settings current, Settings previous) { + if (hasChanged(current, previous)) { + T value = getValue(current, previous); + apply(value, current, previous); + return true; + } + return false; + } + + /** + * Returns a callable runnable that calls {@link #apply(Object, Settings, Settings)} if the settings + * actually changed. This allows to defer the update to a later point in time while keeping type safety. + * If the value didn't change the returned runnable is a noop. + */ + default Runnable updater(Settings current, Settings previous) { + if (hasChanged(current, previous)) { + T value = getValue(current, previous); + return () -> { apply(value, current, previous);}; + } + return () -> {}; + } + } + + /** + * Returns the {@link Setting} for the given key or null if the setting can not be found. + */ + public Setting get(String key) { + Setting setting = keySettings.get(key); + if (setting == null) { + for (Map.Entry> entry : complexMatchers.entrySet()) { + if (entry.getValue().match(key)) { + return entry.getValue(); + } + } + } else { + return setting; + } + return null; + } + + /** + * Returns true if the setting for the given key is dynamically updateable. Otherwise false. + */ + public boolean hasDynamicSetting(String key) { + final Setting setting = get(key); + return setting != null && setting.isDynamic(); + } + + /** + * Returns a settings object that contains all settings that are not + * already set in the given source. The diff contains either the default value for each + * setting or the settings value in the given default settings. + */ + public Settings diff(Settings source, Settings defaultSettings) { + Settings.Builder builder = Settings.builder(); + for (Setting setting : keySettings.values()) { + if (setting.exists(source) == false) { + builder.put(setting.getKey(), setting.getRaw(defaultSettings)); + } + } + return builder.build(); + } + +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java new file mode 100644 index 00000000000..ac9631d29b1 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -0,0 +1,141 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction; +import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.cluster.InternalClusterInfoService; +import org.elasticsearch.cluster.action.index.MappingUpdatedAction; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.decider.*; +import org.elasticsearch.cluster.service.InternalClusterService; +import org.elasticsearch.common.logging.ESLoggerFactory; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.ZenDiscovery; +import org.elasticsearch.discovery.zen.elect.ElectMasterService; +import org.elasticsearch.index.store.IndexStoreConfig; +import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; +import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.indices.ttl.IndicesTTLService; +import org.elasticsearch.search.SearchService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; + +import java.util.*; + +/** + * Encapsulates all valid cluster level settings. + */ +public final class ClusterSettings extends AbstractScopedSettings { + + public ClusterSettings(Settings settings, Set> settingsSet) { + super(settings, settingsSet, Setting.Scope.CLUSTER); + } + + + @Override + public synchronized Settings applySettings(Settings newSettings) { + Settings settings = super.applySettings(newSettings); + try { + for (Map.Entry entry : settings.getAsMap().entrySet()) { + if (entry.getKey().startsWith("logger.")) { + String component = entry.getKey().substring("logger.".length()); + if ("_root".equals(component)) { + ESLoggerFactory.getRootLogger().setLevel(entry.getValue()); + } else { + ESLoggerFactory.getLogger(component).setLevel(entry.getValue()); + } + } + } + } catch (Exception e) { + logger.warn("failed to refresh settings for [{}]", e, "logger"); + } + return settings; + } + + /** + * Returns true if the settings is a logger setting. + */ + public boolean isLoggerSetting(String key) { + return key.startsWith("logger."); + } + + + public static Set> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, + AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, + BalancedShardsAllocator.THRESHOLD_SETTING, + ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING, + ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING, + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING, + ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_INCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING, + FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, + IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, + IndicesTTLService.INDICES_TTL_INTERVAL_SETTING, + MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING, + MetaData.SETTING_READ_ONLY_SETTING, + RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, + RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, + RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, + RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, + RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, + RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, + ThreadPool.THREADPOOL_GROUP_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING, + ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING, + DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, + InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, + SnapshotInProgressAllocationDecider.CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING, + DestructiveOperations.REQUIRES_NAME_SETTING, + DiscoverySettings.PUBLISH_TIMEOUT_SETTING, + DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING, + DiscoverySettings.COMMIT_TIMEOUT_SETTING, + DiscoverySettings.NO_MASTER_BLOCK_SETTING, + HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, + InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING, + SearchService.DEFAULT_SEARCH_TIMEOUT_SETTING, + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, + TransportService.TRACE_LOG_EXCLUDE_SETTING, + TransportService.TRACE_LOG_INCLUDE_SETTING, + TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING, + ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING, + InternalClusterService.CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING, + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, + Transport.TRANSPORT_PROFILES_SETTING, + Transport.TRANSPORT_TCP_COMPRESS))); +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java new file mode 100644 index 00000000000..236df5c567b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -0,0 +1,461 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.action.support.ToXContentToBytes; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.MemorySizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.*; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.regex.Pattern; + +/** + */ +public class Setting extends ToXContentToBytes { + private final String key; + protected final Function defaultValue; + private final Function parser; + private final boolean dynamic; + private final Scope scope; + + /** + * Creates a new Setting instance + * @param key the settings key for this setting. + * @param defaultValue a default value function that returns the default values string representation. + * @param parser a parser that parses the string rep into a complex datatype. + * @param dynamic true iff this setting can be dynamically updateable + * @param scope the scope of this setting + */ + public Setting(String key, Function defaultValue, Function parser, boolean dynamic, Scope scope) { + assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null"; + this.key = key; + this.defaultValue = defaultValue; + this.parser = parser; + this.dynamic = dynamic; + this.scope = scope; + } + + /** + * Returns the settings key or a prefix if this setting is a group setting + * @see #isGroupSetting() + */ + public final String getKey() { + return key; + } + + /** + * Returns true iff this setting is dynamically updateable, otherwise false + */ + public final boolean isDynamic() { + return dynamic; + } + + /** + * Returns the settings scope + */ + public final Scope getScope() { + return scope; + } + + /** + * Returns true iff this setting is a group setting. Group settings represent a set of settings + * rather than a single value. The key, see {@link #getKey()}, in contrast to non-group settings is a prefix like cluster.store. + * that matches all settings with this prefix. + */ + boolean isGroupSetting() { + return false; + } + + boolean hasComplexMatcher() { + return isGroupSetting(); + } + + /** + * Returns the default values string representation for this setting. + * @param settings a settings object for settings that has a default value depending on another setting if available + */ + public final String getDefault(Settings settings) { + return defaultValue.apply(settings); + } + + /** + * Returns true iff this setting is present in the given settings object. Otherwise false + */ + public final boolean exists(Settings settings) { + return settings.get(key) != null; + } + + /** + * Returns the settings value. If the setting is not present in the given settings object the default value is returned + * instead. + */ + public T get(Settings settings) { + String value = getRaw(settings); + try { + return parser.apply(value); + } catch (ElasticsearchParseException ex) { + throw new IllegalArgumentException(ex.getMessage(), ex); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", ex); + } catch (IllegalArgumentException ex) { + throw ex; + } catch (Exception t) { + throw new IllegalArgumentException("Failed to parse value [" + value + "] for setting [" + getKey() + "]", t); + } + } + + /** + * Returns the raw (string) settings value. If the setting is not present in the given settings object the default value is returned + * instead. This is useful if the value can't be parsed due to an invalid value to access the actual value. + */ + public String getRaw(Settings settings) { + return settings.get(key, defaultValue.apply(settings)); + } + + /** + * Returns true iff the given key matches the settings key or if this setting is a group setting if the + * given key is part of the settings group. + * @see #isGroupSetting() + */ + public boolean match(String toTest) { + return key.equals(toTest); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("key", key); + builder.field("type", scope.name()); + builder.field("dynamic", dynamic); + builder.field("is_group_setting", isGroupSetting()); + builder.field("default", defaultValue.apply(Settings.EMPTY)); + builder.endObject(); + return builder; + } + + /** + * The settings scope - settings can either be cluster settings or per index settings. + */ + public enum Scope { + CLUSTER, + INDEX; + } + + final AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger) { + return newUpdater(consumer, logger, (s) -> {}); + } + + AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer validator) { + if (isDynamic()) { + return new Updater(consumer, logger, validator); + } else { + throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); + } + } + + /** + * this is used for settings that depend on each other... see {@link org.elasticsearch.common.settings.AbstractScopedSettings#addSettingsUpdateConsumer(Setting, Setting, BiConsumer)} and it's + * usage for details. + */ + static AbstractScopedSettings.SettingUpdater> compoundUpdater(final BiConsumer consumer, final Setting
aSettting, final Setting bSetting, ESLogger logger) { + final AbstractScopedSettings.SettingUpdater aSettingUpdater = aSettting.newUpdater(null, logger); + final AbstractScopedSettings.SettingUpdater bSettingUpdater = bSetting.newUpdater(null, logger); + return new AbstractScopedSettings.SettingUpdater>() { + @Override + public boolean hasChanged(Settings current, Settings previous) { + return aSettingUpdater.hasChanged(current, previous) || bSettingUpdater.hasChanged(current, previous); + } + + @Override + public Tuple getValue(Settings current, Settings previous) { + return new Tuple<>(aSettingUpdater.getValue(current, previous), bSettingUpdater.getValue(current, previous)); + } + + @Override + public void apply(Tuple value, Settings current, Settings previous) { + consumer.accept(value.v1(), value.v2()); + } + + @Override + public String toString() { + return "CompoundUpdater for: " + aSettingUpdater + " and " + bSettingUpdater; + } + }; + } + + + private class Updater implements AbstractScopedSettings.SettingUpdater { + private final Consumer consumer; + private final ESLogger logger; + private final Consumer accept; + + public Updater(Consumer consumer, ESLogger logger, Consumer accept) { + this.consumer = consumer; + this.logger = logger; + this.accept = accept; + } + + @Override + public String toString() { + return "Updater for: " + Setting.this.toString(); + } + + @Override + public boolean hasChanged(Settings current, Settings previous) { + final String newValue = getRaw(current); + final String value = getRaw(previous); + assert isGroupSetting() == false : "group settings must override this method"; + assert value != null : "value was null but can't be unless default is null which is invalid"; + + return value.equals(newValue) == false; + } + + @Override + public T getValue(Settings current, Settings previous) { + final String newValue = getRaw(current); + final String value = getRaw(previous); + T inst = get(current); + try { + accept.accept(inst); + } catch (Exception | AssertionError e) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + value + "] to [" + newValue + "]", e); + } + return inst; + } + + @Override + public void apply(T value, Settings current, Settings previous) { + logger.info("update [{}] from [{}] to [{}]", key, getRaw(previous), getRaw(current)); + consumer.accept(value); + } + } + + + public Setting(String key, String defaultValue, Function parser, boolean dynamic, Scope scope) { + this(key, (s) -> defaultValue, parser, dynamic, scope); + } + + public static Setting floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope); + } + + public static Setting floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> { + float value = Float.parseFloat(s); + if (value < minValue) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return value; + }, dynamic, scope); + } + + public static Setting intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope); + } + + public static int parseInt(String s, int minValue, String key) { + int value = Integer.parseInt(s); + if (value < minValue) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return value; + } + + public static Setting intSetting(String key, int defaultValue, boolean dynamic, Scope scope) { + return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope); + } + + public static Setting boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope); + } + + public static Setting byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope); + } + + public static Setting byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> value.toString(), (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope); + } + + public static Setting positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { + return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope); + } + + public static Setting> listSetting(String key, List defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { + return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope); + } + public static Setting> listSetting(String key, Function> defaultStringValue, Function singleValueParser, boolean dynamic, Scope scope) { + Function> parser = (s) -> { + try (XContentParser xContentParser = XContentType.JSON.xContent().createParser(s)){ + XContentParser.Token token = xContentParser.nextToken(); + if (token != XContentParser.Token.START_ARRAY) { + throw new IllegalArgumentException("expected START_ARRAY but got " + token); + } + ArrayList list = new ArrayList<>(); + while ((token = xContentParser.nextToken()) !=XContentParser.Token.END_ARRAY) { + if (token != XContentParser.Token.VALUE_STRING) { + throw new IllegalArgumentException("expected VALUE_STRING but got " + token); + } + list.add(singleValueParser.apply(xContentParser.text())); + } + return list; + } catch (IOException e) { + throw new IllegalArgumentException("failed to parse array", e); + } + }; + return new Setting>(key, (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) { + private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?"); + @Override + public String getRaw(Settings settings) { + String[] array = settings.getAsArray(key, null); + return array == null ? defaultValue.apply(settings) : arrayToParsableString(array); + } + + public boolean match(String toTest) { + return pattern.matcher(toTest).matches(); + } + + @Override + boolean hasComplexMatcher() { + return true; + } + }; + } + + private static String arrayToParsableString(String[] array) { + try { + XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); + builder.startArray(); + for (String element : array) { + builder.value(element); + } + builder.endArray(); + return builder.string(); + } catch (IOException ex) { + throw new ElasticsearchException(ex); + } + } + + + + public static Setting groupSetting(String key, boolean dynamic, Scope scope) { + if (key.endsWith(".") == false) { + throw new IllegalArgumentException("key must end with a '.'"); + } + return new Setting(key, "", (s) -> null, dynamic, scope) { + + @Override + public boolean isGroupSetting() { + return true; + } + + @Override + public Settings get(Settings settings) { + return settings.getByPrefix(key); + } + + @Override + public boolean match(String toTest) { + return Regex.simpleMatch(key + "*", toTest); + } + + @Override + public AbstractScopedSettings.SettingUpdater newUpdater(Consumer consumer, ESLogger logger, Consumer validator) { + if (isDynamic() == false) { + throw new IllegalStateException("setting [" + getKey() + "] is not dynamic"); + } + final Setting setting = this; + return new AbstractScopedSettings.SettingUpdater() { + + @Override + public boolean hasChanged(Settings current, Settings previous) { + Settings currentSettings = get(current); + Settings previousSettings = get(previous); + return currentSettings.equals(previousSettings) == false; + } + + @Override + public Settings getValue(Settings current, Settings previous) { + Settings currentSettings = get(current); + Settings previousSettings = get(previous); + try { + validator.accept(currentSettings); + } catch (Exception | AssertionError e) { + throw new IllegalArgumentException("illegal value can't update [" + key + "] from [" + previousSettings.getAsMap() + "] to [" + currentSettings.getAsMap() + "]", e); + } + return currentSettings; + } + + @Override + public void apply(Settings value, Settings current, Settings previous) { + consumer.accept(value); + } + + @Override + public String toString() { + return "Updater for: " + setting.toString(); + } + }; + } + }; + } + + public static Setting timeSetting(String key, Function defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, defaultValue, (s) -> { + TimeValue timeValue = TimeValue.parseTimeValue(s, null, key); + if (timeValue.millis() < minValue.millis()) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return timeValue; + }, dynamic, scope); + } + + public static Setting timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) { + return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope); + } + + public static Setting timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, defaultValue, key), dynamic, scope); + } + + public static Setting doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) { + return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> { + final double d = Double.parseDouble(s); + if (d < minValue) { + throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue); + } + return d; + }, dynamic, scope); + } + +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/Settings.java b/core/src/main/java/org/elasticsearch/common/settings/Settings.java index 5e083a9e740..989b05d4bf2 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -597,6 +597,8 @@ public final class Settings implements ToXContent { return result.toArray(new String[result.size()]); } + + /** * Returns group settings for the given setting prefix. */ @@ -614,6 +616,9 @@ public final class Settings implements ToXContent { if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') { settingPrefix = settingPrefix + "."; } + return getGroupsInternal(settingPrefix, ignoreNonGrouped); + } + private Map getGroupsInternal(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException { // we don't really care that it might happen twice Map> map = new LinkedHashMap<>(); for (Object o : settings.keySet()) { @@ -643,6 +648,16 @@ public final class Settings implements ToXContent { } return Collections.unmodifiableMap(retVal); } + /** + * Returns group settings for the given setting prefix. + */ + public Map getAsGroups() throws SettingsException { + return getAsGroups(false); + } + + public Map getAsGroups(boolean ignoreNonGrouped) throws SettingsException { + return getGroupsInternal("", ignoreNonGrouped); + } /** * Returns a parsed version. @@ -706,7 +721,7 @@ public final class Settings implements ToXContent { Builder builder = new Builder(); int numberOfSettings = in.readVInt(); for (int i = 0; i < numberOfSettings; i++) { - builder.put(in.readString(), in.readString()); + builder.put(in.readString(), in.readOptionalString()); } return builder.build(); } @@ -715,7 +730,7 @@ public final class Settings implements ToXContent { out.writeVInt(settings.getAsMap().size()); for (Map.Entry entry : settings.getAsMap().entrySet()) { out.writeString(entry.getKey()); - out.writeString(entry.getValue()); + out.writeOptionalString(entry.getValue()); } } @@ -818,6 +833,10 @@ public final class Settings implements ToXContent { return this; } + public Builder putNull(String key) { + return put(key, (String) null); + } + /** * Sets a setting with the provided setting key and class as value. * diff --git a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java index 2ae4799d9f3..8bc8ce1b651 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java +++ b/core/src/main/java/org/elasticsearch/common/settings/SettingsModule.java @@ -21,6 +21,10 @@ package org.elasticsearch.common.settings; import org.elasticsearch.common.inject.AbstractModule; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; + /** * A module that binds the provided settings to the {@link Settings} interface. * @@ -30,15 +34,36 @@ public class SettingsModule extends AbstractModule { private final Settings settings; private final SettingsFilter settingsFilter; + private final Map> clusterDynamicSettings = new HashMap<>(); + public SettingsModule(Settings settings, SettingsFilter settingsFilter) { this.settings = settings; this.settingsFilter = settingsFilter; + for (Setting setting : ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) { + registerSetting(setting); + } } @Override protected void configure() { bind(Settings.class).toInstance(settings); bind(SettingsFilter.class).toInstance(settingsFilter); + final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(clusterDynamicSettings.values())); + bind(ClusterSettings.class).toInstance(clusterSettings); } -} \ No newline at end of file + + public void registerSetting(Setting setting) { + switch (setting.getScope()) { + case CLUSTER: + if (clusterDynamicSettings.containsKey(setting.getKey())) { + throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice"); + } + clusterDynamicSettings.put(setting.getKey(), setting); + break; + case INDEX: + throw new UnsupportedOperationException("not yet implemented"); + } + } + +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java index 725c7e56949..9c2f973b96e 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java +++ b/core/src/main/java/org/elasticsearch/common/settings/loader/XContentSettingsLoader.java @@ -103,9 +103,9 @@ public abstract class XContentSettingsLoader implements SettingsLoader { } else if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { - // ignore this + serializeValue(settings, sb, path, parser, currentFieldName, true); } else { - serializeValue(settings, sb, path, parser, currentFieldName); + serializeValue(settings, sb, path, parser, currentFieldName, false); } } @@ -126,31 +126,33 @@ public abstract class XContentSettingsLoader implements SettingsLoader { } else if (token == XContentParser.Token.FIELD_NAME) { fieldName = parser.currentName(); } else if (token == XContentParser.Token.VALUE_NULL) { + serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), true); // ignore } else { - serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++)); + serializeValue(settings, sb, path, parser, fieldName + '.' + (counter++), false); } } } - private void serializeValue(Map settings, StringBuilder sb, List path, XContentParser parser, String fieldName) throws IOException { + private void serializeValue(Map settings, StringBuilder sb, List path, XContentParser parser, String fieldName, boolean isNull) throws IOException { sb.setLength(0); for (String pathEle : path) { sb.append(pathEle).append('.'); } sb.append(fieldName); String key = sb.toString(); - String currentValue = parser.text(); - String previousValue = settings.put(key, currentValue); - if (previousValue != null) { + String currentValue = isNull ? null : parser.text(); + + if (settings.containsKey(key)) { throw new ElasticsearchParseException( "duplicate settings key [{}] found at line number [{}], column number [{}], previous value [{}], current value [{}]", key, parser.getTokenLocation().lineNumber, parser.getTokenLocation().columnNumber, - previousValue, + settings.get(key), currentValue ); } + settings.put(key, currentValue); } } diff --git a/core/src/main/java/org/elasticsearch/common/text/BytesText.java b/core/src/main/java/org/elasticsearch/common/text/BytesText.java deleted file mode 100644 index d78055db2bd..00000000000 --- a/core/src/main/java/org/elasticsearch/common/text/BytesText.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.text; - -import java.nio.charset.StandardCharsets; -import org.elasticsearch.common.bytes.BytesReference; - -/** - * A {@link BytesReference} representation of the text, will always convert on the fly to a {@link String}. - */ -public class BytesText implements Text { - - private BytesReference bytes; - private int hash; - - public BytesText(BytesReference bytes) { - this.bytes = bytes; - } - - @Override - public boolean hasBytes() { - return true; - } - - @Override - public BytesReference bytes() { - return bytes; - } - - @Override - public boolean hasString() { - return false; - } - - @Override - public String string() { - // TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil - if (!bytes.hasArray()) { - bytes = bytes.toBytesArray(); - } - return new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8); - } - - @Override - public String toString() { - return string(); - } - - @Override - public int hashCode() { - if (hash == 0) { - hash = bytes.hashCode(); - } - return hash; - } - - @Override - public boolean equals(Object obj) { - return bytes().equals(((Text) obj).bytes()); - } - - @Override - public int compareTo(Text text) { - return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes()); - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java b/core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java deleted file mode 100644 index 36bf76ce441..00000000000 --- a/core/src/main/java/org/elasticsearch/common/text/StringAndBytesText.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.text; - -import java.nio.charset.StandardCharsets; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; - -/** - * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if - * the other is requests, caches the other one in a local reference so no additional conversion will be needed. - */ -public class StringAndBytesText implements Text { - - public static final Text[] EMPTY_ARRAY = new Text[0]; - - public static Text[] convertFromStringArray(String[] strings) { - if (strings.length == 0) { - return EMPTY_ARRAY; - } - Text[] texts = new Text[strings.length]; - for (int i = 0; i < strings.length; i++) { - texts[i] = new StringAndBytesText(strings[i]); - } - return texts; - } - - private BytesReference bytes; - private String text; - private int hash; - - public StringAndBytesText(BytesReference bytes) { - this.bytes = bytes; - } - - public StringAndBytesText(String text) { - this.text = text; - } - - @Override - public boolean hasBytes() { - return bytes != null; - } - - @Override - public BytesReference bytes() { - if (bytes == null) { - bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8)); - } - return bytes; - } - - @Override - public boolean hasString() { - return text != null; - } - - @Override - public String string() { - // TODO: we can optimize the conversion based on the bytes reference API similar to UnicodeUtil - if (text == null) { - if (!bytes.hasArray()) { - bytes = bytes.toBytesArray(); - } - text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8); - } - return text; - } - - @Override - public String toString() { - return string(); - } - - @Override - public int hashCode() { - if (hash == 0) { - hash = bytes().hashCode(); - } - return hash; - } - - @Override - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - return bytes().equals(((Text) obj).bytes()); - } - - @Override - public int compareTo(Text text) { - return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes()); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/text/StringText.java b/core/src/main/java/org/elasticsearch/common/text/StringText.java deleted file mode 100644 index 9d12096b2c0..00000000000 --- a/core/src/main/java/org/elasticsearch/common/text/StringText.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.common.text; - -import java.nio.charset.StandardCharsets; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; - -/** - * A {@link String} only representation of the text. Will always convert to bytes on the fly. - */ -public class StringText implements Text { - - public static final Text[] EMPTY_ARRAY = new Text[0]; - - public static Text[] convertFromStringArray(String[] strings) { - if (strings.length == 0) { - return EMPTY_ARRAY; - } - Text[] texts = new Text[strings.length]; - for (int i = 0; i < strings.length; i++) { - texts[i] = new StringText(strings[i]); - } - return texts; - } - - private final String text; - private int hash; - - public StringText(String text) { - this.text = text; - } - - @Override - public boolean hasBytes() { - return false; - } - - @Override - public BytesReference bytes() { - return new BytesArray(text.getBytes(StandardCharsets.UTF_8)); - } - - @Override - public boolean hasString() { - return true; - } - - @Override - public String string() { - return text; - } - - @Override - public String toString() { - return string(); - } - - @Override - public int hashCode() { - // we use bytes here so we can be consistent with other text implementations - if (hash == 0) { - hash = bytes().hashCode(); - } - return hash; - } - - @Override - public boolean equals(Object obj) { - // we use bytes here so we can be consistent with other text implementations - return bytes().equals(((Text) obj).bytes()); - } - - @Override - public int compareTo(Text text) { - return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes()); - } -} diff --git a/core/src/main/java/org/elasticsearch/common/text/Text.java b/core/src/main/java/org/elasticsearch/common/text/Text.java index 9fe1ea5f35d..d5b02f559f5 100644 --- a/core/src/main/java/org/elasticsearch/common/text/Text.java +++ b/core/src/main/java/org/elasticsearch/common/text/Text.java @@ -18,39 +18,101 @@ */ package org.elasticsearch.common.text; +import java.nio.charset.StandardCharsets; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; - /** - * Text represents a (usually) long text data. We use this abstraction instead of {@link String} - * so we can represent it in a more optimized manner in memory as well as serializing it over the - * network as well as converting it to json format. + * Both {@link String} and {@link BytesReference} representation of the text. Starts with one of those, and if + * the other is requests, caches the other one in a local reference so no additional conversion will be needed. */ -public interface Text extends Comparable { +public final class Text implements Comparable { + + public static final Text[] EMPTY_ARRAY = new Text[0]; + + public static Text[] convertFromStringArray(String[] strings) { + if (strings.length == 0) { + return EMPTY_ARRAY; + } + Text[] texts = new Text[strings.length]; + for (int i = 0; i < strings.length; i++) { + texts[i] = new Text(strings[i]); + } + return texts; + } + + private BytesReference bytes; + private String text; + private int hash; + + public Text(BytesReference bytes) { + this.bytes = bytes; + } + + public Text(String text) { + this.text = text; + } /** - * Are bytes available without the need to be converted into bytes when calling {@link #bytes()}. + * Whether a {@link BytesReference} view of the data is already materialized. */ - boolean hasBytes(); + public boolean hasBytes() { + return bytes != null; + } /** - * The UTF8 bytes representing the the text, might be converted on the fly, see {@link #hasBytes()} + * Returns a {@link BytesReference} view of the data. */ - BytesReference bytes(); + public BytesReference bytes() { + if (bytes == null) { + bytes = new BytesArray(text.getBytes(StandardCharsets.UTF_8)); + } + return bytes; + } /** - * Is there a {@link String} representation of the text. If not, then it {@link #hasBytes()}. + * Whether a {@link String} view of the data is already materialized. */ - boolean hasString(); + public boolean hasString() { + return text != null; + } /** - * Returns the string representation of the text, might be converted to a string on the fly. + * Returns a {@link String} view of the data. */ - String string(); + public String string() { + if (text == null) { + if (!bytes.hasArray()) { + bytes = bytes.toBytesArray(); + } + text = new String(bytes.array(), bytes.arrayOffset(), bytes.length(), StandardCharsets.UTF_8); + } + return text; + } - /** - * Returns the string representation of the text, might be converted to a string on the fly. - */ @Override - String toString(); + public String toString() { + return string(); + } + + @Override + public int hashCode() { + if (hash == 0) { + hash = bytes().hashCode(); + } + return hash; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + return bytes().equals(((Text) obj).bytes()); + } + + @Override + public int compareTo(Text text) { + return UTF8SortedAsUnicodeComparator.utf8SortedAsUnicodeSortOrder.compare(bytes(), text.bytes()); + } } diff --git a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java index ee6371605ee..fb44c7dc9a5 100644 --- a/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/core/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -229,6 +229,30 @@ public class TimeValue implements Streamable { return Strings.format1Decimals(value, suffix); } + public String getStringRep() { + if (duration < 0) { + return Long.toString(duration); + } + switch (timeUnit) { + case NANOSECONDS: + return Strings.format1Decimals(duration, "nanos"); + case MICROSECONDS: + return Strings.format1Decimals(duration, "micros"); + case MILLISECONDS: + return Strings.format1Decimals(duration, "ms"); + case SECONDS: + return Strings.format1Decimals(duration, "s"); + case MINUTES: + return Strings.format1Decimals(duration, "m"); + case HOURS: + return Strings.format1Decimals(duration, "h"); + case DAYS: + return Strings.format1Decimals(duration, "d"); + default: + throw new IllegalArgumentException("unknown time unit: " + timeUnit.name()); + } + } + public static TimeValue parseTimeValue(String sValue, TimeValue defaultValue, String settingName) { settingName = Objects.requireNonNull(settingName); assert settingName.startsWith("index.") == false || MetaDataIndexUpgradeService.INDEX_TIME_SETTINGS.contains(settingName) : settingName; diff --git a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java index af8e7534692..d26485a121d 100644 --- a/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java +++ b/core/src/main/java/org/elasticsearch/common/xcontent/XContentBuilder.java @@ -53,7 +53,7 @@ import java.util.Map; */ public final class XContentBuilder implements BytesStream, Releasable { - public static enum FieldCaseConversion { + public enum FieldCaseConversion { /** * No conversion will occur. */ @@ -251,14 +251,7 @@ public final class XContentBuilder implements BytesStream, Releasable { } public XContentBuilder field(XContentBuilderString name) throws IOException { - if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) { - generator.writeFieldName(name.underscore()); - } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) { - generator.writeFieldName(name.camelCase()); - } else { - generator.writeFieldName(name.underscore()); - } - return this; + return field(name, fieldCaseConversion); } public XContentBuilder field(XContentBuilderString name, FieldCaseConversion conversion) throws IOException { @@ -273,22 +266,13 @@ public final class XContentBuilder implements BytesStream, Releasable { } public XContentBuilder field(String name) throws IOException { - if (fieldCaseConversion == FieldCaseConversion.UNDERSCORE) { - if (cachedStringBuilder == null) { - cachedStringBuilder = new StringBuilder(); - } - name = Strings.toUnderscoreCase(name, cachedStringBuilder); - } else if (fieldCaseConversion == FieldCaseConversion.CAMELCASE) { - if (cachedStringBuilder == null) { - cachedStringBuilder = new StringBuilder(); - } - name = Strings.toCamelCase(name, cachedStringBuilder); - } - generator.writeFieldName(name); - return this; + return field(name, fieldCaseConversion); } public XContentBuilder field(String name, FieldCaseConversion conversion) throws IOException { + if (name == null) { + throw new IllegalArgumentException("field name cannot be null"); + } if (conversion == FieldCaseConversion.UNDERSCORE) { if (cachedStringBuilder == null) { cachedStringBuilder = new StringBuilder(); diff --git a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java index eeba9baa32a..a82099658ea 100644 --- a/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java +++ b/core/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -23,6 +23,7 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -41,7 +42,6 @@ public class DiscoveryService extends AbstractLifecycleComponent PUBLISH_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.publish_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER); /** * sets the timeout for receiving enough acks for a specific cluster state and committing it. failing * to receive responses within this window will cause the cluster state change to be rejected. */ - public static final String COMMIT_TIMEOUT = "discovery.zen.commit_timeout"; - public static final String NO_MASTER_BLOCK = "discovery.zen.no_master_block"; - public static final String PUBLISH_DIFF_ENABLE = "discovery.zen.publish_diff.enable"; - - public static final TimeValue DEFAULT_PUBLISH_TIMEOUT = TimeValue.timeValueSeconds(30); - public static final TimeValue DEFAULT_COMMIT_TIMEOUT = TimeValue.timeValueSeconds(30); - public static final String DEFAULT_NO_MASTER_BLOCK = "write"; - public final static int NO_MASTER_BLOCK_ID = 2; - public final static boolean DEFAULT_PUBLISH_DIFF_ENABLE = true; - - public final static ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL); - public final static ClusterBlock NO_MASTER_BLOCK_WRITES = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, false, RestStatus.SERVICE_UNAVAILABLE, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); + public static final Setting COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.CLUSTER); + public static final Setting NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER); + public static final Setting PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER); private volatile ClusterBlock noMasterBlock; private volatile TimeValue publishTimeout; + private volatile TimeValue commitTimeout; private volatile boolean publishDiff; @Inject - public DiscoverySettings(Settings settings, NodeSettingsService nodeSettingsService) { + public DiscoverySettings(Settings settings, ClusterSettings clusterSettings) { super(settings); - nodeSettingsService.addListener(new ApplySettings()); - this.noMasterBlock = parseNoMasterBlock(settings.get(NO_MASTER_BLOCK, DEFAULT_NO_MASTER_BLOCK)); - this.publishTimeout = settings.getAsTime(PUBLISH_TIMEOUT, DEFAULT_PUBLISH_TIMEOUT); - this.commitTimeout = settings.getAsTime(COMMIT_TIMEOUT, new TimeValue(Math.min(DEFAULT_COMMIT_TIMEOUT.millis(), publishTimeout.millis()))); - this.publishDiff = settings.getAsBoolean(PUBLISH_DIFF_ENABLE, DEFAULT_PUBLISH_DIFF_ENABLE); + clusterSettings.addSettingsUpdateConsumer(NO_MASTER_BLOCK_SETTING, this::setNoMasterBlock); + clusterSettings.addSettingsUpdateConsumer(PUBLISH_DIFF_ENABLE_SETTING, this::setPublishDiff); + clusterSettings.addSettingsUpdateConsumer(COMMIT_TIMEOUT_SETTING, this::setCommitTimeout); + clusterSettings.addSettingsUpdateConsumer(PUBLISH_TIMEOUT_SETTING, this::setPublishTimeout); + this.noMasterBlock = NO_MASTER_BLOCK_SETTING.get(settings); + this.publishTimeout = PUBLISH_TIMEOUT_SETTING.get(settings); + this.commitTimeout = COMMIT_TIMEOUT_SETTING.get(settings); + this.publishDiff = PUBLISH_DIFF_ENABLE_SETTING.get(settings); } /** @@ -88,47 +87,25 @@ public class DiscoverySettings extends AbstractComponent { return noMasterBlock; } - public boolean getPublishDiff() { return publishDiff;} - - private class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - TimeValue newPublishTimeout = settings.getAsTime(PUBLISH_TIMEOUT, null); - if (newPublishTimeout != null) { - if (newPublishTimeout.millis() != publishTimeout.millis()) { - logger.info("updating [{}] from [{}] to [{}]", PUBLISH_TIMEOUT, publishTimeout, newPublishTimeout); - publishTimeout = newPublishTimeout; - if (settings.getAsTime(COMMIT_TIMEOUT, null) == null && commitTimeout.millis() > publishTimeout.millis()) { - logger.info("reducing default [{}] to [{}] due to publish timeout change", COMMIT_TIMEOUT, publishTimeout); - commitTimeout = publishTimeout; - } - } - } - TimeValue newCommitTimeout = settings.getAsTime(COMMIT_TIMEOUT, null); - if (newCommitTimeout != null) { - if (newCommitTimeout.millis() != commitTimeout.millis()) { - logger.info("updating [{}] from [{}] to [{}]", COMMIT_TIMEOUT, commitTimeout, newCommitTimeout); - commitTimeout = newCommitTimeout; - } - } - String newNoMasterBlockValue = settings.get(NO_MASTER_BLOCK); - if (newNoMasterBlockValue != null) { - ClusterBlock newNoMasterBlock = parseNoMasterBlock(newNoMasterBlockValue); - if (newNoMasterBlock != noMasterBlock) { - noMasterBlock = newNoMasterBlock; - } - } - Boolean newPublishDiff = settings.getAsBoolean(PUBLISH_DIFF_ENABLE, null); - if (newPublishDiff != null) { - if (newPublishDiff != publishDiff) { - logger.info("updating [{}] from [{}] to [{}]", PUBLISH_DIFF_ENABLE, publishDiff, newPublishDiff); - publishDiff = newPublishDiff; - } - } - } + private void setNoMasterBlock(ClusterBlock noMasterBlock) { + this.noMasterBlock = noMasterBlock; } - private ClusterBlock parseNoMasterBlock(String value) { + private void setPublishDiff(boolean publishDiff) { + this.publishDiff = publishDiff; + } + + private void setPublishTimeout(TimeValue publishTimeout) { + this.publishTimeout = publishTimeout; + } + + private void setCommitTimeout(TimeValue commitTimeout) { + this.commitTimeout = commitTimeout; + } + + public boolean getPublishDiff() { return publishDiff;} + + private static ClusterBlock parseNoMasterBlock(String value) { switch (value) { case "all": return NO_MASTER_BLOCK_ALL; diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 03111d141ef..8849a849f97 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -39,6 +39,8 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.Discovery; @@ -55,7 +57,6 @@ import org.elasticsearch.discovery.zen.ping.ZenPingService; import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; @@ -74,7 +75,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; */ public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider { - public final static String SETTING_REJOIN_ON_MASTER_GONE = "discovery.zen.rejoin_on_master_gone"; + public final static Setting REJOIN_ON_MASTER_GONE_SETTING = Setting.boolSetting("discovery.zen.rejoin_on_master_gone", true, true, Setting.Scope.CLUSTER); public final static String SETTING_PING_TIMEOUT = "discovery.zen.ping_timeout"; public final static String SETTING_JOIN_TIMEOUT = "discovery.zen.join_timeout"; public final static String SETTING_JOIN_RETRY_ATTEMPTS = "discovery.zen.join_retry_attempts"; @@ -139,7 +140,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen @Inject public ZenDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, - TransportService transportService, final ClusterService clusterService, NodeSettingsService nodeSettingsService, + TransportService transportService, final ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService, ElectMasterService electMasterService, DiscoverySettings discoverySettings) { super(settings); @@ -160,7 +161,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen this.masterElectionFilterClientNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_CLIENT, true); this.masterElectionFilterDataNodes = settings.getAsBoolean(SETTING_MASTER_ELECTION_FILTER_DATA, false); this.masterElectionWaitForJoinsTimeout = settings.getAsTime(SETTING_MASTER_ELECTION_WAIT_FOR_JOINS_TIMEOUT, TimeValue.timeValueMillis(joinTimeout.millis() / 2)); - this.rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, true); + this.rejoinOnMasterGone = REJOIN_ON_MASTER_GONE_SETTING.get(settings); if (this.joinRetryAttempts < 1) { throw new IllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]"); @@ -171,7 +172,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen logger.debug("using ping_timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", this.pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING, this::handleMinimumMasterNodesChanged, (value) -> { + final ClusterState clusterState = clusterService.state(); + int masterNodes = clusterState.nodes().masterNodes().size(); + if (value > masterNodes) { + throw new IllegalArgumentException("cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey() + " to more than the current master nodes count [" + masterNodes + "]"); + } + }); + clusterSettings.addSettingsUpdateConsumer(REJOIN_ON_MASTER_GONE_SETTING, this::setRejoingOnMasterGone); this.masterFD = new MasterFaultDetection(settings, threadPool, transportService, clusterName, clusterService); this.masterFD.addListener(new MasterNodeFailureListener()); @@ -306,6 +314,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen return clusterJoinsCounter.get() > 0; } + private void setRejoingOnMasterGone(boolean rejoin) { + this.rejoinOnMasterGone = rejoin; + } + /** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */ @@ -824,8 +836,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } } - void handleJoinRequest(final DiscoveryNode node, final MembershipAction.JoinCallback callback) { - + void handleJoinRequest(final DiscoveryNode node, final ClusterState state, final MembershipAction.JoinCallback callback) { if (!transportService.addressSupported(node.address().getClass())) { // TODO, what should we do now? Maybe inform that node that its crap? logger.warn("received a wrong address type from [{}], ignoring...", node); @@ -837,7 +848,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // Sanity check: maybe we don't end up here, because serialization may have failed. if (node.getVersion().before(minimumNodeJoinVersion)) { callback.onFailure( - new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]") + new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]") ); return; } @@ -847,7 +858,13 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen // validate the join request, will throw a failure if it fails, which will get back to the // node calling the join request - membership.sendValidateJoinRequestBlocking(node, joinTimeout); + try { + membership.sendValidateJoinRequestBlocking(node, state, joinTimeout); + } catch (Throwable e) { + logger.warn("failed to validate incoming join request from node [{}]", node); + callback.onFailure(new IllegalStateException("failure when sending a validation request to node", e)); + return; + } nodeJoinController.handleJoinRequest(node, callback); } } @@ -1027,7 +1044,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen private class MembershipListener implements MembershipAction.MembershipListener { @Override public void onJoin(DiscoveryNode node, MembershipAction.JoinCallback callback) { - handleJoinRequest(node, callback); + handleJoinRequest(node, clusterService.state(), callback); } @Override @@ -1139,26 +1156,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - int minimumMasterNodes = settings.getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, - ZenDiscovery.this.electMaster.minimumMasterNodes()); - if (minimumMasterNodes != ZenDiscovery.this.electMaster.minimumMasterNodes()) { - logger.info("updating {} from [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, - ZenDiscovery.this.electMaster.minimumMasterNodes(), minimumMasterNodes); - handleMinimumMasterNodesChanged(minimumMasterNodes); - } - - boolean rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, ZenDiscovery.this.rejoinOnMasterGone); - if (rejoinOnMasterGone != ZenDiscovery.this.rejoinOnMasterGone) { - logger.info("updating {} from [{}] to [{}]", SETTING_REJOIN_ON_MASTER_GONE, ZenDiscovery.this.rejoinOnMasterGone, rejoinOnMasterGone); - ZenDiscovery.this.rejoinOnMasterGone = rejoinOnMasterGone; - } - } - } - - /** * All control of the join thread should happen under the cluster state update task thread. * This is important to make sure that the background joining process is always in sync with any cluster state updates diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java index 9164a85388a..9cca1edfc5e 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/elect/ElectMasterService.java @@ -22,11 +22,10 @@ package org.elasticsearch.discovery.zen.elect; import com.carrotsearch.hppc.ObjectContainer; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; @@ -41,23 +40,7 @@ import java.util.List; */ public class ElectMasterService extends AbstractComponent { - public static final String DISCOVERY_ZEN_MINIMUM_MASTER_NODES = "discovery.zen.minimum_master_nodes"; - public static final Validator DISCOVERY_ZEN_MINIMUM_MASTER_NODES_VALIDATOR = new Validator() { - @Override - public String validate(String setting, String value, ClusterState clusterState) { - int intValue; - try { - intValue = Integer.parseInt(value); - } catch (NumberFormatException ex) { - return "cannot parse value [" + value + "] as an integer"; - } - int masterNodes = clusterState.nodes().masterNodes().size(); - if (intValue > masterNodes) { - return "cannot set " + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES + " to more than the current master nodes count [" + masterNodes + "]"; - } - return null; - } - }; + public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, true, Setting.Scope.CLUSTER); // This is the minimum version a master needs to be on, otherwise it gets ignored // This is based on the minimum compatible version of the current version this node is on @@ -70,7 +53,7 @@ public class ElectMasterService extends AbstractComponent { public ElectMasterService(Settings settings, Version version) { super(settings); this.minMasterVersion = version.minimumCompatibilityVersion(); - this.minimumMasterNodes = settings.getAsInt(DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1); + this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes); } diff --git a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java index 4260b992ddb..5a96addc842 100644 --- a/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java +++ b/core/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.zen.membership; import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; @@ -88,10 +89,6 @@ public class MembershipAction extends AbstractComponent { transportService.submitRequest(masterNode, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS); } - public void sendJoinRequest(DiscoveryNode masterNode, DiscoveryNode node) { - transportService.sendRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME); - } - public void sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) { transportService.submitRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); @@ -100,8 +97,8 @@ public class MembershipAction extends AbstractComponent { /** * Validates the join request, throwing a failure if it failed. */ - public void sendValidateJoinRequestBlocking(DiscoveryNode node, TimeValue timeout) { - transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(), EmptyTransportResponseHandler.INSTANCE_SAME) + public void sendValidateJoinRequestBlocking(DiscoveryNode node, ClusterState state, TimeValue timeout) { + transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(state), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); } @@ -156,9 +153,26 @@ public class MembershipAction extends AbstractComponent { } } - public static class ValidateJoinRequest extends TransportRequest { + class ValidateJoinRequest extends TransportRequest { + private ClusterState state; - public ValidateJoinRequest() { + ValidateJoinRequest() { + } + + ValidateJoinRequest(ClusterState state) { + this.state = state; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + this.state = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + this.state.writeTo(out); } } diff --git a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 3a1b430f98b..93e95dfaa96 100644 --- a/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/core/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -21,7 +21,12 @@ package org.elasticsearch.env; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.store.*; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.LockObtainFailedException; +import org.apache.lucene.store.NativeFSLockFactory; +import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -31,6 +36,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -38,11 +44,25 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; +import org.elasticsearch.monitor.jvm.JvmInfo; import java.io.Closeable; import java.io.IOException; -import java.nio.file.*; -import java.util.*; +import java.nio.file.AtomicMoveNotSupportedException; +import java.nio.file.DirectoryStream; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -145,7 +165,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { for (int dirIndex = 0; dirIndex < environment.dataWithClusterFiles().length; dirIndex++) { Path dir = environment.dataWithClusterFiles()[dirIndex].resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId)); Files.createDirectories(dir); - + try (Directory luceneDir = FSDirectory.open(dir, NativeFSLockFactory.INSTANCE)) { logger.trace("obtaining node lock on {} ...", dir.toAbsolutePath()); try { @@ -187,6 +207,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { } maybeLogPathDetails(); + maybeLogHeapDetails(); if (settings.getAsBoolean(SETTING_ENABLE_LUCENE_SEGMENT_INFOS_TRACE, false)) { SegmentInfos.setInfoStream(System.out); @@ -274,6 +295,13 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { } } + private void maybeLogHeapDetails() { + JvmInfo jvmInfo = JvmInfo.jvmInfo(); + ByteSizeValue maxHeapSize = jvmInfo.getMem().getHeapMax(); + String useCompressedOops = jvmInfo.useCompressedOops(); + logger.info("heap size [{}], compressed ordinary object pointers [{}]", maxHeapSize, useCompressedOops); + } + private static String toString(Collection items) { StringBuilder b = new StringBuilder(); for(String item : items) { @@ -811,7 +839,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { // Sanity check: assert Integer.parseInt(shardPath.getName(count-1).toString()) >= 0; assert "indices".equals(shardPath.getName(count-3).toString()); - + return shardPath.getParent().getParent().getParent(); } } diff --git a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java index e83ec695a96..5e410fb6d53 100644 --- a/core/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/core/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -227,7 +227,7 @@ public class GatewayService extends AbstractLifecycleComponent i // automatically generate a UID for the metadata if we need to metaDataBuilder.generateClusterUuidIfNeeded(); - if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) { + if (MetaData.SETTING_READ_ONLY_SETTING.get(recoveredState.metaData().settings()) || MetaData.SETTING_READ_ONLY_SETTING.get(currentState.metaData().settings())) { blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK); } diff --git a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index e560b4458b7..79bfbdac8c2 100644 --- a/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -20,6 +20,7 @@ package org.elasticsearch.gateway; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -30,8 +31,10 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; import java.util.*; +import java.util.stream.Collectors; /** * The primary shard allocator allocates primary shard that were not created as @@ -39,6 +42,7 @@ import java.util.*; */ public abstract class PrimaryShardAllocator extends AbstractComponent { + @Deprecated public static final String INDEX_RECOVERY_INITIAL_SHARDS = "index.recovery.initial_shards"; private final String initialShards; @@ -56,13 +60,21 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); while (unassignedIterator.hasNext()) { - ShardRouting shard = unassignedIterator.next(); + final ShardRouting shard = unassignedIterator.next(); - if (needToFindPrimaryCopy(shard) == false) { + if (shard.primary() == false) { continue; } - AsyncShardFetch.FetchResult shardState = fetchData(shard, allocation); + final IndexMetaData indexMetaData = metaData.index(shard.getIndex()); + final IndexSettings indexSettings = new IndexSettings(indexMetaData, settings, Collections.emptyList()); + + if (shard.allocatedPostIndexCreate(indexMetaData) == false) { + // when we create a fresh index + continue; + } + + final AsyncShardFetch.FetchResult shardState = fetchData(shard, allocation); if (shardState.hasData() == false) { logger.trace("{}: ignoring allocation, still fetching shard started state", shard); allocation.setHasPendingAsyncFetch(); @@ -70,25 +82,50 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { continue; } - IndexMetaData indexMetaData = metaData.index(shard.getIndex()); - Settings indexSettings = Settings.builder().put(settings).put(indexMetaData.getSettings()).build(); + final Set lastActiveAllocationIds = indexMetaData.activeAllocationIds(shard.id()); + final boolean snapshotRestore = shard.restoreSource() != null; + final boolean recoverOnAnyNode = recoverOnAnyNode(indexSettings); - NodesAndVersions nodesAndVersions = buildNodesAndVersions(shard, recoverOnAnyNode(indexSettings), allocation.getIgnoreNodes(shard.shardId()), shardState); - logger.debug("[{}][{}] found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion); + final NodesAndVersions nodesAndVersions; + final boolean enoughAllocationsFound; - if (isEnoughAllocationsFound(shard, indexMetaData, nodesAndVersions) == false) { - // if we are restoring this shard we still can allocate - if (shard.restoreSource() == null) { + if (lastActiveAllocationIds.isEmpty()) { + assert indexSettings.getIndexVersionCreated().before(Version.V_3_0_0) : "trying to allocated a primary with an empty allocation id set, but index is new"; + // when we load an old index (after upgrading cluster) or restore a snapshot of an old index + // fall back to old version-based allocation mode + // Note that once the shard has been active, lastActiveAllocationIds will be non-empty + nodesAndVersions = buildNodesAndVersions(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState); + if (snapshotRestore || recoverOnAnyNode) { + enoughAllocationsFound = nodesAndVersions.allocationsFound > 0; + } else { + enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(shard, indexMetaData, nodesAndVersions); + } + logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), Version.V_3_0_0, nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion); + } else { + assert lastActiveAllocationIds.isEmpty() == false; + // use allocation ids to select nodes + nodesAndVersions = buildAllocationIdBasedNodes(shard, snapshotRestore || recoverOnAnyNode, + allocation.getIgnoreNodes(shard.shardId()), lastActiveAllocationIds, shardState); + enoughAllocationsFound = nodesAndVersions.allocationsFound > 0; + logger.debug("[{}][{}]: found {} allocations of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, lastActiveAllocationIds); + } + + if (enoughAllocationsFound == false){ + if (snapshotRestore) { + // let BalancedShardsAllocator take care of allocating this shard + logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource()); + } else if (recoverOnAnyNode) { + // let BalancedShardsAllocator take care of allocating this shard + logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id()); + } else { // we can't really allocate, so ignore it and continue unassignedIterator.removeAndIgnore(); logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound); - } else { - logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.restoreSource()); } continue; } - NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions); + final NodesToAllocate nodesToAllocate = buildNodesToAllocate(shard, allocation, nodesAndVersions.nodes); if (nodesToAllocate.yesNodes.isEmpty() == false) { DiscoveryNode node = nodesToAllocate.yesNodes.get(0); logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, node); @@ -109,63 +146,99 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } /** - * Does the shard need to find a primary copy? + * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have an allocation id matching + * lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but + * entries with matching allocation id are always at the front of the list. */ - boolean needToFindPrimaryCopy(ShardRouting shard) { - if (shard.primary() == false) { - return false; + protected NodesAndVersions buildAllocationIdBasedNodes(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, + Set lastActiveAllocationIds, AsyncShardFetch.FetchResult shardState) { + List matchingNodes = new ArrayList<>(); + List nonMatchingNodes = new ArrayList<>(); + long highestVersion = -1; + for (TransportNodesListGatewayStartedShards.NodeGatewayStartedShards nodeShardState : shardState.getData().values()) { + DiscoveryNode node = nodeShardState.getNode(); + String allocationId = nodeShardState.allocationId(); + + if (ignoreNodes.contains(node.id())) { + continue; + } + + if (nodeShardState.storeException() == null) { + if (allocationId == null && nodeShardState.version() != -1) { + // old shard with no allocation id, assign dummy value so that it gets added below in case of matchAnyShard + allocationId = "_n/a_"; + } + + logger.trace("[{}] on node [{}] has allocation id [{}] of shard", shard, nodeShardState.getNode(), allocationId); + } else { + logger.trace("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", nodeShardState.storeException(), shard, nodeShardState.getNode(), allocationId); + allocationId = null; + } + + if (allocationId != null) { + if (lastActiveAllocationIds.contains(allocationId)) { + matchingNodes.add(node); + highestVersion = Math.max(highestVersion, nodeShardState.version()); + } else if (matchAnyShard) { + nonMatchingNodes.add(node); + highestVersion = Math.max(highestVersion, nodeShardState.version()); + } + } } - // this is an API allocation, ignore since we know there is no data... - if (shard.allocatedPostIndexCreate() == false) { - return false; - } + List nodes = new ArrayList<>(); + nodes.addAll(matchingNodes); + nodes.addAll(nonMatchingNodes); - return true; + if (logger.isTraceEnabled()) { + logger.trace("{} candidates for allocation: {}", shard, nodes.stream().map(DiscoveryNode::name).collect(Collectors.joining(", "))); + } + return new NodesAndVersions(nodes, nodes.size(), highestVersion); } - private boolean isEnoughAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) { + /** + * used by old version-based allocation + */ + private boolean isEnoughVersionBasedAllocationsFound(ShardRouting shard, IndexMetaData indexMetaData, NodesAndVersions nodesAndVersions) { // check if the counts meets the minimum set int requiredAllocation = 1; // if we restore from a repository one copy is more then enough - if (shard.restoreSource() == null) { - try { - String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards)); - if ("quorum".equals(initialShards)) { - if (indexMetaData.getNumberOfReplicas() > 1) { - requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1; - } - } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) { - if (indexMetaData.getNumberOfReplicas() > 2) { - requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2); - } - } else if ("one".equals(initialShards)) { - requiredAllocation = 1; - } else if ("full".equals(initialShards) || "all".equals(initialShards)) { - requiredAllocation = indexMetaData.getNumberOfReplicas() + 1; - } else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) { - if (indexMetaData.getNumberOfReplicas() > 1) { - requiredAllocation = indexMetaData.getNumberOfReplicas(); - } - } else { - requiredAllocation = Integer.parseInt(initialShards); + try { + String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards)); + if ("quorum".equals(initialShards)) { + if (indexMetaData.getNumberOfReplicas() > 1) { + requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1; } - } catch (Exception e) { - logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard); + } else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) { + if (indexMetaData.getNumberOfReplicas() > 2) { + requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2); + } + } else if ("one".equals(initialShards)) { + requiredAllocation = 1; + } else if ("full".equals(initialShards) || "all".equals(initialShards)) { + requiredAllocation = indexMetaData.getNumberOfReplicas() + 1; + } else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) { + if (indexMetaData.getNumberOfReplicas() > 1) { + requiredAllocation = indexMetaData.getNumberOfReplicas(); + } + } else { + requiredAllocation = Integer.parseInt(initialShards); } + } catch (Exception e) { + logger.warn("[{}][{}] failed to derived initial_shards from value {}, ignore allocation for {}", shard.index(), shard.id(), initialShards, shard); } return nodesAndVersions.allocationsFound >= requiredAllocation; } /** - * Based on the nodes and versions, build the list of yes/no/throttle nodes that the shard applies to. + * Split the list of nodes to lists of yes/no/throttle nodes based on allocation deciders */ - private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, NodesAndVersions nodesAndVersions) { + private NodesToAllocate buildNodesToAllocate(ShardRouting shard, RoutingAllocation allocation, List nodes) { List yesNodes = new ArrayList<>(); List throttledNodes = new ArrayList<>(); List noNodes = new ArrayList<>(); - for (DiscoveryNode discoNode : nodesAndVersions.nodes) { + for (DiscoveryNode discoNode : nodes) { RoutingNode node = allocation.routingNodes().node(discoNode.id()); if (node == null) { continue; @@ -184,9 +257,11 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { } /** - * Builds a list of nodes and version + * Builds a list of nodes. If matchAnyShard is set to false, only nodes that have the highest shard version + * are added to the list. Otherwise, any node that has a shard is added to the list, but entries with highest + * version are always at the front of the list. */ - NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean recoveryOnAnyNode, Set ignoreNodes, + NodesAndVersions buildNodesAndVersions(ShardRouting shard, boolean matchAnyShard, Set ignoreNodes, AsyncShardFetch.FetchResult shardState) { final Map nodesWithVersion = new HashMap<>(); int numberOfAllocationsFound = 0; @@ -208,20 +283,15 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { version = -1; } - if (recoveryOnAnyNode) { - numberOfAllocationsFound++; - if (version > highestVersion) { - highestVersion = version; - } - // We always put the node without clearing the map - nodesWithVersion.put(node, version); - } else if (version != -1) { + if (version != -1) { numberOfAllocationsFound++; // If we've found a new "best" candidate, clear the // current candidates and add it if (version > highestVersion) { highestVersion = version; - nodesWithVersion.clear(); + if (matchAnyShard == false) { + nodesWithVersion.clear(); + } nodesWithVersion.put(node, version); } else if (version == highestVersion) { // If the candidate is the same, add it to the @@ -258,9 +328,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent { * Return {@code true} if the index is configured to allow shards to be * recovered on any node */ - private boolean recoverOnAnyNode(Settings idxSettings) { - return IndexMetaData.isOnSharedFilesystem(idxSettings) && - idxSettings.getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false); + private boolean recoverOnAnyNode(IndexSettings indexSettings) { + return indexSettings.isOnSharedFilesystem() + && indexSettings.getSettings().getAsBoolean(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false); } protected abstract AsyncShardFetch.FetchResult fetchData(ShardRouting shard, RoutingAllocation allocation); diff --git a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index c87f4d94755..0b5f2bc58d9 100644 --- a/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/core/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -24,6 +24,8 @@ import com.carrotsearch.hppc.ObjectLongMap; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectLongCursor; import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -56,6 +58,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { */ public boolean processExistingRecoveries(RoutingAllocation allocation) { boolean changed = false; + MetaData metaData = allocation.metaData(); for (RoutingNodes.RoutingNodesIterator nodes = allocation.routingNodes().nodes(); nodes.hasNext(); ) { nodes.next(); for (RoutingNodes.RoutingNodeIterator it = nodes.nodeShards(); it.hasNext(); ) { @@ -69,8 +72,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { if (shard.relocatingNodeId() != null) { continue; } + // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - if (shard.allocatedPostIndexCreate() == false) { + IndexMetaData indexMetaData = metaData.index(shard.getIndex()); + if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } @@ -114,6 +119,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { boolean changed = false; final RoutingNodes routingNodes = allocation.routingNodes(); final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); + MetaData metaData = allocation.metaData(); while (unassignedIterator.hasNext()) { ShardRouting shard = unassignedIterator.next(); if (shard.primary()) { @@ -121,7 +127,8 @@ public abstract class ReplicaShardAllocator extends AbstractComponent { } // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... - if (shard.allocatedPostIndexCreate() == false) { + IndexMetaData indexMetaData = metaData.index(shard.getIndex()); + if (shard.allocatedPostIndexCreate(indexMetaData) == false) { continue; } diff --git a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index d91b4bd8cdd..539ac924262 100644 --- a/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/core/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -139,7 +139,8 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction Store.tryOpenIndex(shardPath.resolveIndex()); } catch (Exception exception) { logger.trace("{} can't open index for shard [{}] in path [{}]", exception, shardId, shardStateMetaData, (shardPath != null) ? shardPath.resolveIndex() : ""); - return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, exception); + String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null; + return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId, exception); } } // old shard metadata doesn't have the actual index UUID so we need to check if the actual uuid in the metadata @@ -149,11 +150,12 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction logger.warn("{} shard state info found but indexUUID didn't match expected [{}] actual [{}]", shardId, indexUUID, shardStateMetaData.indexUUID); } else { logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData); - return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version); + String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null; + return new NodeGatewayStartedShards(clusterService.localNode(), shardStateMetaData.version, allocationId); } } logger.trace("{} no local shard info found", shardId); - return new NodeGatewayStartedShards(clusterService.localNode(), -1); + return new NodeGatewayStartedShards(clusterService.localNode(), -1, null); } catch (Exception e) { throw new ElasticsearchException("failed to load started shards", e); } @@ -277,17 +279,19 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction public static class NodeGatewayStartedShards extends BaseNodeResponse { private long version = -1; + private String allocationId = null; private Throwable storeException = null; public NodeGatewayStartedShards() { } - public NodeGatewayStartedShards(DiscoveryNode node, long version) { - this(node, version, null); + public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId) { + this(node, version, allocationId, null); } - public NodeGatewayStartedShards(DiscoveryNode node, long version, Throwable storeException) { + public NodeGatewayStartedShards(DiscoveryNode node, long version, String allocationId, Throwable storeException) { super(node); this.version = version; + this.allocationId = allocationId; this.storeException = storeException; } @@ -295,6 +299,10 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction return this.version; } + public String allocationId() { + return this.allocationId; + } + public Throwable storeException() { return this.storeException; } @@ -303,16 +311,17 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction public void readFrom(StreamInput in) throws IOException { super.readFrom(in); version = in.readLong(); + allocationId = in.readOptionalString(); if (in.readBoolean()) { storeException = in.readThrowable(); } - } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(version); + out.writeOptionalString(allocationId); if (storeException != null) { out.writeBoolean(true); out.writeThrowable(storeException); diff --git a/core/src/main/java/org/elasticsearch/http/HttpServerModule.java b/core/src/main/java/org/elasticsearch/http/HttpServerModule.java deleted file mode 100644 index 49d67369643..00000000000 --- a/core/src/main/java/org/elasticsearch/http/HttpServerModule.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.http; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.http.netty.NettyHttpServerTransport; - -import java.util.Objects; - -/** - * - */ -public class HttpServerModule extends AbstractModule { - - private final Settings settings; - private final ESLogger logger; - - private Class httpServerTransportClass; - - public HttpServerModule(Settings settings) { - this.settings = settings; - this.logger = Loggers.getLogger(getClass(), settings); - this.httpServerTransportClass = NettyHttpServerTransport.class; - } - - @SuppressWarnings({"unchecked"}) - @Override - protected void configure() { - bind(HttpServerTransport.class).to(httpServerTransportClass).asEagerSingleton(); - bind(HttpServer.class).asEagerSingleton(); - } - - public void setHttpServerTransport(Class httpServerTransport, String source) { - Objects.requireNonNull(httpServerTransport, "Configured http server transport may not be null"); - Objects.requireNonNull(source, "Plugin, that changes transport may not be null"); - logger.info("Using [{}] as http transport, overridden by [{}]", httpServerTransportClass.getName(), source); - this.httpServerTransportClass = httpServerTransport; - } -} diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java index 10008c76a54..4bcbf4079c0 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java +++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/HttpPipeliningHandler.java @@ -1,5 +1,27 @@ package org.elasticsearch.http.netty.pipelining; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// this file is from netty-http-pipelining, under apache 2.0 license +// see github.com/typesafehub/netty-http-pipelining + import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.jboss.netty.channel.*; diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java index 6b713a08020..622a3e6ac9f 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java +++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedDownstreamChannelEvent.java @@ -1,5 +1,27 @@ package org.elasticsearch.http.netty.pipelining; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// this file is from netty-http-pipelining, under apache 2.0 license +// see github.com/typesafehub/netty-http-pipelining + import org.jboss.netty.channel.*; /** diff --git a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java index 7343b29b6c5..cc47b5be320 100644 --- a/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java +++ b/core/src/main/java/org/elasticsearch/http/netty/pipelining/OrderedUpstreamMessageEvent.java @@ -1,5 +1,27 @@ package org.elasticsearch.http.netty.pipelining; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// this file is from netty-http-pipelining, under apache 2.0 license +// see github.com/typesafehub/netty-http-pipelining + import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.UpstreamMessageEvent; diff --git a/core/src/main/java/org/elasticsearch/index/IndexService.java b/core/src/main/java/org/elasticsearch/index/IndexService.java index 92ca00231b5..a6b66742c55 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexService.java +++ b/core/src/main/java/org/elasticsearch/index/IndexService.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; @@ -48,7 +47,13 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; -import org.elasticsearch.index.shard.*; +import org.elasticsearch.index.shard.IndexEventListener; +import org.elasticsearch.index.shard.IndexSearcherWrapper; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShadowIndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardNotFoundException; +import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; @@ -73,7 +78,7 @@ import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; /** * */ -public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable{ +public final class IndexService extends AbstractIndexComponent implements IndexComponent, Iterable { private final IndexEventListener eventListener; private final AnalysisService analysisService; @@ -93,7 +98,6 @@ public final class IndexService extends AbstractIndexComponent implements IndexC private final AtomicBoolean deleted = new AtomicBoolean(false); private final IndexSettings indexSettings; - @Inject public IndexService(IndexSettings indexSettings, NodeEnvironment nodeEnv, SimilarityService similarityService, ShardStoreDeleter shardStoreDeleter, @@ -146,7 +150,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC */ @Nullable public IndexShard getShardOrNull(int shardId) { - return shards.get(shardId); + return shards.get(shardId); } /** @@ -160,13 +164,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC return indexShard; } - public Set shardIds() { return shards.keySet(); } + public Set shardIds() { + return shards.keySet(); + } public IndexCache cache() { return indexCache; } - public IndexFieldDataService fieldData() { return indexFieldData; } + public IndexFieldDataService fieldData() { + return indexFieldData; + } public AnalysisService analysisService() { return this.analysisService; @@ -207,7 +215,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC private long getAvgShardSizeInBytes() throws IOException { long sum = 0; int count = 0; - for(IndexShard indexShard : this) { + for (IndexShard indexShard : this) { sum += indexShard.store().stats().sizeInBytes(); count++; } @@ -254,17 +262,17 @@ public final class IndexService extends AbstractIndexComponent implements IndexC // TODO: we should, instead, hold a "bytes reserved" of how large we anticipate this shard will be, e.g. for a shard // that's being relocated/replicated we know how large it will become once it's done copying: // Count up how many shards are currently on each data path: - Map dataPathToShardCount = new HashMap<>(); - for(IndexShard shard : this) { + Map dataPathToShardCount = new HashMap<>(); + for (IndexShard shard : this) { Path dataPath = shard.shardPath().getRootStatePath(); Integer curCount = dataPathToShardCount.get(dataPath); if (curCount == null) { curCount = 0; } - dataPathToShardCount.put(dataPath, curCount+1); + dataPathToShardCount.put(dataPath, curCount + 1); } path = ShardPath.selectNewPathForShard(nodeEnv, shardId, this.indexSettings, routing.getExpectedShardSize() == ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE ? getAvgShardSizeInBytes() : routing.getExpectedShardSize(), - dataPathToShardCount); + dataPathToShardCount); logger.debug("{} creating using a new path [{}]", shardId, path); } else { logger.debug("{} creating using an existing path [{}]", shardId, path); @@ -277,7 +285,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC logger.debug("creating shard_id {}", shardId); // if we are on a shared FS we only own the shard (ie. we can safely delete it) if we are the primary. final boolean canDeleteShardContent = IndexMetaData.isOnSharedFilesystem(indexSettings) == false || - (primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); + (primary && IndexMetaData.isOnSharedFilesystem(indexSettings)); store = new Store(shardId, this.indexSettings, indexStore.newDirectoryService(path), lock, new StoreCloseListener(shardId, canDeleteShardContent, () -> nodeServicesProvider.getIndicesQueryCache().onClose(shardId))); if (useShadowEngine(primary, indexSettings)) { indexShard = new ShadowIndexShard(shardId, this.indexSettings, path, store, indexCache, mapperService, similarityService, indexFieldData, engineFactory, eventListener, searcherWrapper, nodeServicesProvider); @@ -462,6 +470,7 @@ public final class IndexService extends AbstractIndexComponent implements IndexC } } } + /** * Returns the filter associated with listed filtering aliases. *

diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 1404b61b8ec..de13eb10977 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -781,10 +781,14 @@ public class InternalEngine extends Engine { // we need to fail the engine. it might have already been failed before // but we are double-checking it's failed and closed if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { - failEngine("already closed by tragic event", indexWriter.getTragicException()); + failEngine("already closed by tragic event on the index writer", indexWriter.getTragicException()); + } else if (translog.isOpen() == false && translog.getTragicException() != null) { + failEngine("already closed by tragic event on the translog", translog.getTragicException()); } return true; - } else if (t != null && indexWriter.isOpen() == false && indexWriter.getTragicException() == t) { + } else if (t != null && + ((indexWriter.isOpen() == false && indexWriter.getTragicException() == t) + || (translog.isOpen() == false && translog.getTragicException() == t))) { // this spot on - we are handling the tragic event exception here so we have to fail the engine // right away failEngine(source, t); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java b/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java index 47c43720162..54c6ef20e3e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ContentPath.java @@ -19,16 +19,9 @@ package org.elasticsearch.index.mapper; -public class ContentPath { +public final class ContentPath { - public enum Type { - JUST_NAME, - FULL, - } - - private Type pathType; - - private final char delimiter; + private static final char DELIMITER = '.'; private final StringBuilder sb; @@ -47,7 +40,6 @@ public class ContentPath { * number of path elements to not be included in {@link #pathAsText(String)}. */ public ContentPath(int offset) { - this.delimiter = '.'; this.sb = new StringBuilder(); this.offset = offset; reset(); @@ -71,26 +63,11 @@ public class ContentPath { } public String pathAsText(String name) { - if (pathType == Type.JUST_NAME) { - return name; - } - return fullPathAsText(name); - } - - public String fullPathAsText(String name) { sb.setLength(0); for (int i = offset; i < index; i++) { - sb.append(path[i]).append(delimiter); + sb.append(path[i]).append(DELIMITER); } sb.append(name); return sb.toString(); } - - public Type pathType() { - return pathType; - } - - public void pathType(Type type) { - this.pathType = type; - } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index c4fec8cf095..333cda459f7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.common.xcontent.ToXContent; @@ -52,6 +52,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -113,11 +114,11 @@ public class DocumentMapper implements ToXContent { private final MapperService mapperService; private final String type; - private final StringAndBytesText typeText; + private final Text typeText; private volatile CompressedXContent mappingSource; - private final Mapping mapping; + private volatile Mapping mapping; private final DocumentParser documentParser; @@ -137,7 +138,7 @@ public class DocumentMapper implements ToXContent { ReentrantReadWriteLock mappingLock) { this.mapperService = mapperService; this.type = rootObjectMapper.name(); - this.typeText = new StringAndBytesText(this.type); + this.typeText = new Text(this.type); this.mapping = new Mapping( Version.indexCreated(indexSettings), rootObjectMapper, @@ -352,16 +353,19 @@ public class DocumentMapper implements ToXContent { mapperService.addMappers(type, objectMappers, fieldMappers); } - public MergeResult merge(Mapping mapping, boolean simulate, boolean updateAllTypes) { + public void merge(Mapping mapping, boolean simulate, boolean updateAllTypes) { try (ReleasableLock lock = mappingWriteLock.acquire()) { mapperService.checkMappersCompatibility(type, mapping, updateAllTypes); - final MergeResult mergeResult = new MergeResult(simulate, updateAllTypes); - this.mapping.merge(mapping, mergeResult); + // do the merge even if simulate == false so that we get exceptions + Mapping merged = this.mapping.merge(mapping, updateAllTypes); if (simulate == false) { - addMappers(mergeResult.getNewObjectMappers(), mergeResult.getNewFieldMappers(), updateAllTypes); + this.mapping = merged; + Collection objectMappers = new ArrayList<>(); + Collection fieldMappers = new ArrayList<>(Arrays.asList(merged.metadataMappers)); + MapperUtils.collect(merged.root, objectMappers, fieldMappers); + addMappers(objectMappers, fieldMappers, updateAllTypes); refreshSource(); } - return mergeResult; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index b0ad972d575..bb1749d2336 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -234,9 +234,6 @@ class DocumentParser implements Closeable { nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.FIELD_TYPE)); } - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(mapper.pathType()); - // if we are at the end of the previous object, advance if (token == XContentParser.Token.END_OBJECT) { token = parser.nextToken(); @@ -267,12 +264,11 @@ class DocumentParser implements Closeable { if (update == null) { update = newUpdate; } else { - MapperUtils.merge(update, newUpdate); + update = update.merge(newUpdate, false); } } } // restore the enable path flag - context.path().pathType(origPathType); if (nested.isNested()) { ParseContext.Document nestedDoc = context.doc(); ParseContext.Document parentDoc = nestedDoc.getParent(); @@ -341,7 +337,7 @@ class DocumentParser implements Closeable { context.path().remove(); Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object"); if (builder == null) { - builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(mapper.pathType()); + builder = MapperBuilders.object(currentFieldName).enabled(true); // if this is a non root object, then explicitly set the dynamic behavior if set if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) { ((ObjectMapper.Builder) builder).dynamic(mapper.dynamic()); @@ -610,7 +606,7 @@ class DocumentParser implements Closeable { return null; } final Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); - final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().fullPathAsText(currentFieldName)); + final MappedFieldType existingFieldType = context.mapperService().fullName(context.path().pathAsText(currentFieldName)); Mapper.Builder builder = null; if (existingFieldType != null) { // create a builder of the same type @@ -695,7 +691,7 @@ class DocumentParser implements Closeable { if (paths.length > 1) { ObjectMapper parent = context.root(); for (int i = 0; i < paths.length-1; i++) { - mapper = context.docMapper().objectMappers().get(context.path().fullPathAsText(paths[i])); + mapper = context.docMapper().objectMappers().get(context.path().pathAsText(paths[i])); if (mapper == null) { // One mapping is missing, check if we are allowed to create a dynamic one. ObjectMapper.Dynamic dynamic = parent.dynamic(); @@ -713,12 +709,12 @@ class DocumentParser implements Closeable { if (!(parent instanceof RootObjectMapper) && parent.dynamic() != ObjectMapper.Defaults.DYNAMIC) { ((ObjectMapper.Builder) builder).dynamic(parent.dynamic()); } - builder = MapperBuilders.object(paths[i]).enabled(true).pathType(parent.pathType()); + builder = MapperBuilders.object(paths[i]).enabled(true); } Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); mapper = (ObjectMapper) builder.build(builderContext); if (mapper.nested() != ObjectMapper.Nested.NO) { - throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().fullPathAsText(paths[i]) + "]) through `copy_to`"); + throw new MapperParsingException("It is forbidden to create dynamic nested objects ([" + context.path().pathAsText(paths[i]) + "]) through `copy_to`"); } break; case FALSE: @@ -759,7 +755,7 @@ class DocumentParser implements Closeable { private static M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { final Mapper update = parseObjectOrField(context, mapper); if (update != null) { - MapperUtils.merge(mapper, update); + mapper = (M) mapper.merge(update, false); } return mapper; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index ced3f08b229..30df3562aec 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -47,7 +47,7 @@ import java.util.List; import java.util.Locale; import java.util.stream.StreamSupport; -public abstract class FieldMapper extends Mapper { +public abstract class FieldMapper extends Mapper implements Cloneable { public abstract static class Builder extends Mapper.Builder { @@ -64,10 +64,10 @@ public abstract class FieldMapper extends Mapper { protected final MultiFields.Builder multiFieldsBuilder; protected CopyTo copyTo; - protected Builder(String name, MappedFieldType fieldType) { + protected Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) { super(name); this.fieldType = fieldType.clone(); - this.defaultFieldType = fieldType.clone(); + this.defaultFieldType = defaultFieldType.clone(); this.defaultOptions = fieldType.indexOptions(); // we have to store it the fieldType is mutable multiFieldsBuilder = new MultiFields.Builder(); } @@ -84,8 +84,13 @@ public abstract class FieldMapper extends Mapper { * if the fieldType has a non-null option we are all good it might have been set through a different * call. */ - final IndexOptions options = getDefaultIndexOption(); - assert options != IndexOptions.NONE : "default IndexOptions is NONE can't enable indexing"; + IndexOptions options = getDefaultIndexOption(); + if (options == IndexOptions.NONE) { + // can happen when an existing type on the same index has disabled indexing + // since we inherit the default field type from the first mapper that is + // created on an index + throw new IllegalArgumentException("mapper [" + name + "] has different [index] values from other types of the same index"); + } fieldType.setIndexOptions(options); } } else { @@ -202,11 +207,6 @@ public abstract class FieldMapper extends Mapper { return this; } - public T multiFieldPathType(ContentPath.Type pathType) { - multiFieldsBuilder.pathType(pathType); - return builder; - } - public T addMultiField(Mapper.Builder mapperBuilder) { multiFieldsBuilder.add(mapperBuilder); return builder; @@ -237,7 +237,7 @@ public abstract class FieldMapper extends Mapper { } protected String buildFullName(BuilderContext context) { - return context.path().fullPathAsText(name); + return context.path().pathAsText(name); } protected void setupFieldType(BuilderContext context) { @@ -270,7 +270,7 @@ public abstract class FieldMapper extends Mapper { protected MappedFieldTypeReference fieldTypeRef; protected final MappedFieldType defaultFieldType; - protected final MultiFields multiFields; + protected MultiFields multiFields; protected CopyTo copyTo; protected final boolean indexCreatedBefore2x; @@ -359,26 +359,41 @@ public abstract class FieldMapper extends Mapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected FieldMapper clone() { + try { + return (FieldMapper) super.clone(); + } catch (CloneNotSupportedException e) { + throw new AssertionError(e); + } + } + + @Override + public FieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { + FieldMapper merged = clone(); + merged.doMerge(mergeWith, updateAllTypes); + return merged; + } + + /** + * Merge changes coming from {@code mergeWith} in place. + * @param updateAllTypes TODO + */ + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (!this.getClass().equals(mergeWith.getClass())) { String mergedType = mergeWith.getClass().getSimpleName(); if (mergeWith instanceof FieldMapper) { mergedType = ((FieldMapper) mergeWith).contentType(); } - mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); - // different types, return - return; + throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); } FieldMapper fieldMergeWith = (FieldMapper) mergeWith; - multiFields.merge(mergeWith, mergeResult); + multiFields = multiFields.merge(fieldMergeWith.multiFields); - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - // apply changeable values - MappedFieldType fieldType = fieldMergeWith.fieldType().clone(); - fieldType.freeze(); - fieldTypeRef.set(fieldType); - this.copyTo = fieldMergeWith.copyTo; - } + // apply changeable values + MappedFieldType fieldType = fieldMergeWith.fieldType().clone(); + fieldType.freeze(); + fieldTypeRef.set(fieldType); + this.copyTo = fieldMergeWith.copyTo; } @Override @@ -520,18 +535,12 @@ public abstract class FieldMapper extends Mapper { public static class MultiFields { public static MultiFields empty() { - return new MultiFields(ContentPath.Type.FULL, ImmutableOpenMap.of()); + return new MultiFields(ImmutableOpenMap.of()); } public static class Builder { private final ImmutableOpenMap.Builder mapperBuilders = ImmutableOpenMap.builder(); - private ContentPath.Type pathType = ContentPath.Type.FULL; - - public Builder pathType(ContentPath.Type pathType) { - this.pathType = pathType; - return this; - } public Builder add(Mapper.Builder builder) { mapperBuilders.put(builder.name(), builder); @@ -540,13 +549,9 @@ public abstract class FieldMapper extends Mapper { @SuppressWarnings("unchecked") public MultiFields build(FieldMapper.Builder mainFieldBuilder, BuilderContext context) { - if (pathType == ContentPath.Type.FULL && mapperBuilders.isEmpty()) { + if (mapperBuilders.isEmpty()) { return empty(); - } else if (mapperBuilders.isEmpty()) { - return new MultiFields(pathType, ImmutableOpenMap.of()); } else { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); context.path().add(mainFieldBuilder.name()); ImmutableOpenMap.Builder mapperBuilders = this.mapperBuilders; for (ObjectObjectCursor cursor : this.mapperBuilders) { @@ -557,26 +562,25 @@ public abstract class FieldMapper extends Mapper { mapperBuilders.put(key, mapper); } context.path().remove(); - context.path().pathType(origPathType); ImmutableOpenMap.Builder mappers = mapperBuilders.cast(); - return new MultiFields(pathType, mappers.build()); + return new MultiFields(mappers.build()); } } } - private final ContentPath.Type pathType; - private volatile ImmutableOpenMap mappers; + private final ImmutableOpenMap mappers; - public MultiFields(ContentPath.Type pathType, ImmutableOpenMap mappers) { - this.pathType = pathType; - this.mappers = mappers; + private MultiFields(ImmutableOpenMap mappers) { + ImmutableOpenMap.Builder builder = new ImmutableOpenMap.Builder<>(); // we disable the all in multi-field mappers - for (ObjectCursor cursor : mappers.values()) { + for (ObjectObjectCursor cursor : mappers) { FieldMapper mapper = cursor.value; if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); + mapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); } + builder.put(cursor.key, mapper); } + this.mappers = builder.build(); } public void parse(FieldMapper mainField, ParseContext context) throws IOException { @@ -587,58 +591,33 @@ public abstract class FieldMapper extends Mapper { context = context.createMultiFieldContext(); - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); - context.path().add(mainField.simpleName()); for (ObjectCursor cursor : mappers.values()) { cursor.value.parse(context); } context.path().remove(); - context.path().pathType(origPathType); } - // No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge - public void merge(Mapper mergeWith, MergeResult mergeResult) { - FieldMapper mergeWithMultiField = (FieldMapper) mergeWith; + public MultiFields merge(MultiFields mergeWith) { + ImmutableOpenMap.Builder newMappersBuilder = ImmutableOpenMap.builder(mappers); - List newFieldMappers = null; - ImmutableOpenMap.Builder newMappersBuilder = null; - - for (ObjectCursor cursor : mergeWithMultiField.multiFields.mappers.values()) { + for (ObjectCursor cursor : mergeWith.mappers.values()) { FieldMapper mergeWithMapper = cursor.value; - Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); + FieldMapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); if (mergeIntoMapper == null) { - // no mapping, simply add it if not simulating - if (!mergeResult.simulate()) { - // we disable the all in multi-field mappers - if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll(); - } - if (newMappersBuilder == null) { - newMappersBuilder = ImmutableOpenMap.builder(mappers); - } - newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper); - if (mergeWithMapper instanceof FieldMapper) { - if (newFieldMappers == null) { - newFieldMappers = new ArrayList<>(2); - } - newFieldMappers.add(mergeWithMapper); - } + // we disable the all in multi-field mappers + if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) { + mergeWithMapper = (FieldMapper) ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll(); } + newMappersBuilder.put(mergeWithMapper.simpleName(), mergeWithMapper); } else { - mergeIntoMapper.merge(mergeWithMapper, mergeResult); + FieldMapper merged = mergeIntoMapper.merge(mergeWithMapper, false); + newMappersBuilder.put(merged.simpleName(), merged); // override previous definition } } - // first add all field mappers - if (newFieldMappers != null) { - mergeResult.addFieldMappers(newFieldMappers); - } - // now publish mappers - if (newMappersBuilder != null) { - mappers = newMappersBuilder.build(); - } + ImmutableOpenMap mappers = newMappersBuilder.build(); + return new MultiFields(mappers); } public Iterator iterator() { @@ -646,9 +625,6 @@ public abstract class FieldMapper extends Mapper { } public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (pathType != ContentPath.Type.FULL) { - builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); - } if (!mappers.isEmpty()) { // sort the mappers so we get consistent serialization format Mapper[] sortedMappers = mappers.values().toArray(Mapper.class); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java index 33a4dabd3be..4c3aa3c56bb 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -174,5 +174,7 @@ public abstract class Mapper implements ToXContent, Iterable { /** Returns the canonical name which uniquely identifies the mapper against other mappers in a type. */ public abstract String name(); - public abstract void merge(Mapper mergeWith, MergeResult mergeResult); + /** Return the merge of {@code mergeWith} into this. + * Both {@code this} and {@code mergeWith} will be left unmodified. */ + public abstract Mapper merge(Mapper mergeWith, boolean updateAllTypes); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 938f610d6db..37e99e8c90c 100755 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -32,7 +32,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.search.Queries; @@ -92,7 +91,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { private final ReleasableLock mappingWriteLock = new ReleasableLock(mappingLock.writeLock()); private volatile FieldTypeLookup fieldTypes; - private volatile ImmutableOpenMap fullPathObjectMappers = ImmutableOpenMap.of(); + private volatile Map fullPathObjectMappers = new HashMap<>(); private boolean hasNested = false; // updated dynamically to true when a nested object is added private final DocumentMapperParser documentParser; @@ -199,6 +198,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { public DocumentMapper merge(String type, CompressedXContent mappingSource, boolean applyDefault, boolean updateAllTypes) { if (DEFAULT_MAPPING.equals(type)) { // verify we can parse it + // NOTE: never apply the default here DocumentMapper mapper = documentParser.parseCompressed(type, mappingSource); // still add it as a document mapper so we have it registered and, for example, persisted back into // the cluster meta data if needed, or checked for existence @@ -212,75 +212,70 @@ public class MapperService extends AbstractIndexComponent implements Closeable { } return mapper; } else { - return merge(parse(type, mappingSource, applyDefault), updateAllTypes); + try (ReleasableLock lock = mappingWriteLock.acquire()) { + // only apply the default mapping if we don't have the type yet + applyDefault &= mappers.containsKey(type) == false; + return merge(parse(type, mappingSource, applyDefault), updateAllTypes); + } } } // never expose this to the outside world, we need to reparse the doc mapper so we get fresh // instances of field mappers to properly remove existing doc mapper private DocumentMapper merge(DocumentMapper mapper, boolean updateAllTypes) { - try (ReleasableLock lock = mappingWriteLock.acquire()) { - if (mapper.type().length() == 0) { - throw new InvalidTypeNameException("mapping type name is empty"); - } - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]"); - } - if (mapper.type().charAt(0) == '_') { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'"); - } - if (mapper.type().contains("#")) { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it"); - } - if (mapper.type().contains(",")) { - throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it"); - } - if (mapper.type().equals(mapper.parentFieldMapper().type())) { - throw new IllegalArgumentException("The [_parent.type] option can't point to the same type"); - } - if (typeNameStartsWithIllegalDot(mapper)) { - if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { - throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'"); - } else { - logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type()); - } - } - // we can add new field/object mappers while the old ones are there - // since we get new instances of those, and when we remove, we remove - // by instance equality - DocumentMapper oldMapper = mappers.get(mapper.type()); - - if (oldMapper != null) { - // simulate first - MergeResult result = oldMapper.merge(mapper.mapping(), true, updateAllTypes); - if (result.hasConflicts()) { - throw new IllegalArgumentException("Merge failed with failures {" + Arrays.toString(result.buildConflicts()) + "}"); - } - // then apply for real - result = oldMapper.merge(mapper.mapping(), false, updateAllTypes); - assert result.hasConflicts() == false; // we already simulated - return oldMapper; + if (mapper.type().length() == 0) { + throw new InvalidTypeNameException("mapping type name is empty"); + } + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1) && mapper.type().length() > 255) { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]"); + } + if (mapper.type().charAt(0) == '_') { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'"); + } + if (mapper.type().contains("#")) { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it"); + } + if (mapper.type().contains(",")) { + throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it"); + } + if (mapper.type().equals(mapper.parentFieldMapper().type())) { + throw new IllegalArgumentException("The [_parent.type] option can't point to the same type"); + } + if (typeNameStartsWithIllegalDot(mapper)) { + if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_2_0_0_beta1)) { + throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'"); } else { - Tuple, Collection> newMappers = checkMappersCompatibility( - mapper.type(), mapper.mapping(), updateAllTypes); - Collection newObjectMappers = newMappers.v1(); - Collection newFieldMappers = newMappers.v2(); - addMappers(mapper.type(), newObjectMappers, newFieldMappers); - - for (DocumentTypeListener typeListener : typeListeners) { - typeListener.beforeCreate(mapper); - } - mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map(); - if (mapper.parentFieldMapper().active()) { - Set newParentTypes = new HashSet<>(parentTypes.size() + 1); - newParentTypes.addAll(parentTypes); - newParentTypes.add(mapper.parentFieldMapper().type()); - parentTypes = unmodifiableSet(newParentTypes); - } - assert assertSerialization(mapper); - return mapper; + logger.warn("Type [{}] starts with a '.', it is recommended not to start a type name with a '.'", mapper.type()); } } + // we can add new field/object mappers while the old ones are there + // since we get new instances of those, and when we remove, we remove + // by instance equality + DocumentMapper oldMapper = mappers.get(mapper.type()); + + if (oldMapper != null) { + oldMapper.merge(mapper.mapping(), false, updateAllTypes); + return oldMapper; + } else { + Tuple, Collection> newMappers = checkMappersCompatibility( + mapper.type(), mapper.mapping(), updateAllTypes); + Collection newObjectMappers = newMappers.v1(); + Collection newFieldMappers = newMappers.v2(); + addMappers(mapper.type(), newObjectMappers, newFieldMappers); + + for (DocumentTypeListener typeListener : typeListeners) { + typeListener.beforeCreate(mapper); + } + mappers = newMapBuilder(mappers).put(mapper.type(), mapper).map(); + if (mapper.parentFieldMapper().active()) { + Set newParentTypes = new HashSet<>(parentTypes.size() + 1); + newParentTypes.addAll(parentTypes); + newParentTypes.add(mapper.parentFieldMapper().type()); + parentTypes = unmodifiableSet(newParentTypes); + } + assert assertSerialization(mapper); + return mapper; + } } private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) { @@ -300,19 +295,56 @@ public class MapperService extends AbstractIndexComponent implements Closeable { return true; } + private void checkFieldUniqueness(String type, Collection objectMappers, Collection fieldMappers) { + final Set objectFullNames = new HashSet<>(); + for (ObjectMapper objectMapper : objectMappers) { + final String fullPath = objectMapper.fullPath(); + if (objectFullNames.add(fullPath) == false) { + throw new IllegalArgumentException("Object mapper [" + fullPath + "] is defined twice in mapping for type [" + type + "]"); + } + } + + if (indexSettings.getIndexVersionCreated().before(Version.V_3_0_0)) { + // Before 3.0 some metadata mappers are also registered under the root object mapper + // So we avoid false positives by deduplicating mappers + // given that we check exact equality, this would still catch the case that a mapper + // is defined under the root object + Collection uniqueFieldMappers = Collections.newSetFromMap(new IdentityHashMap<>()); + uniqueFieldMappers.addAll(fieldMappers); + fieldMappers = uniqueFieldMappers; + } + + final Set fieldNames = new HashSet<>(); + for (FieldMapper fieldMapper : fieldMappers) { + final String name = fieldMapper.name(); + if (objectFullNames.contains(name)) { + throw new IllegalArgumentException("Field [" + name + "] is defined both as an object and a field in [" + type + "]"); + } else if (fieldNames.add(name) == false) { + throw new IllegalArgumentException("Field [" + name + "] is defined twice in [" + type + "]"); + } + } + } + protected void checkMappersCompatibility(String type, Collection objectMappers, Collection fieldMappers, boolean updateAllTypes) { assert mappingLock.isWriteLockedByCurrentThread(); + + checkFieldUniqueness(type, objectMappers, fieldMappers); + for (ObjectMapper newObjectMapper : objectMappers) { ObjectMapper existingObjectMapper = fullPathObjectMappers.get(newObjectMapper.fullPath()); if (existingObjectMapper != null) { - MergeResult result = new MergeResult(true, updateAllTypes); - existingObjectMapper.merge(newObjectMapper, result); - if (result.hasConflicts()) { - throw new IllegalArgumentException("Mapper for [" + newObjectMapper.fullPath() + "] conflicts with existing mapping in other types" + - Arrays.toString(result.buildConflicts())); - } + // simulate a merge and ignore the result, we are just interested + // in exceptions here + existingObjectMapper.merge(newObjectMapper, updateAllTypes); } } + + for (FieldMapper fieldMapper : fieldMappers) { + if (fullPathObjectMappers.containsKey(fieldMapper.name())) { + throw new IllegalArgumentException("Field [" + fieldMapper.name() + "] is defined as a field in mapping [" + type + "] but this name is already used for an object in other types"); + } + } + fieldTypes.checkCompatibility(type, fieldMappers, updateAllTypes); } @@ -320,9 +352,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable { String type, Mapping mapping, boolean updateAllTypes) { List objectMappers = new ArrayList<>(); List fieldMappers = new ArrayList<>(); - for (MetadataFieldMapper metadataMapper : mapping.metadataMappers) { - fieldMappers.add(metadataMapper); - } + Collections.addAll(fieldMappers, mapping.metadataMappers); MapperUtils.collect(mapping.root, objectMappers, fieldMappers); checkMappersCompatibility(type, objectMappers, fieldMappers, updateAllTypes); return new Tuple<>(objectMappers, fieldMappers); @@ -330,14 +360,14 @@ public class MapperService extends AbstractIndexComponent implements Closeable { protected void addMappers(String type, Collection objectMappers, Collection fieldMappers) { assert mappingLock.isWriteLockedByCurrentThread(); - ImmutableOpenMap.Builder fullPathObjectMappers = ImmutableOpenMap.builder(this.fullPathObjectMappers); + Map fullPathObjectMappers = new HashMap<>(this.fullPathObjectMappers); for (ObjectMapper objectMapper : objectMappers) { fullPathObjectMappers.put(objectMapper.fullPath(), objectMapper); if (objectMapper.nested().isNested()) { hasNested = true; } } - this.fullPathObjectMappers = fullPathObjectMappers.build(); + this.fullPathObjectMappers = Collections.unmodifiableMap(fullPathObjectMappers); this.fieldTypes = this.fieldTypes.copyAndAddAll(type, fieldMappers); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java b/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java index d46c32a932b..04508827f77 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java @@ -27,52 +27,6 @@ import java.util.Collection; public enum MapperUtils { ; - private static MergeResult newStrictMergeResult() { - return new MergeResult(false, false) { - - @Override - public void addFieldMappers(Collection fieldMappers) { - // no-op - } - - @Override - public void addObjectMappers(Collection objectMappers) { - // no-op - } - - @Override - public Collection getNewFieldMappers() { - throw new UnsupportedOperationException("Strict merge result does not support new field mappers"); - } - - @Override - public Collection getNewObjectMappers() { - throw new UnsupportedOperationException("Strict merge result does not support new object mappers"); - } - - @Override - public void addConflict(String mergeFailure) { - throw new MapperParsingException("Merging dynamic updates triggered a conflict: " + mergeFailure); - } - }; - } - - /** - * Merge {@code mergeWith} into {@code mergeTo}. Note: this method only - * merges mappings, not lookup structures. Conflicts are returned as exceptions. - */ - public static void merge(Mapper mergeInto, Mapper mergeWith) { - mergeInto.merge(mergeWith, newStrictMergeResult()); - } - - /** - * Merge {@code mergeWith} into {@code mergeTo}. Note: this method only - * merges mappings, not lookup structures. Conflicts are returned as exceptions. - */ - public static void merge(Mapping mergeInto, Mapping mergeWith) { - mergeInto.merge(mergeWith, newStrictMergeResult()); - } - /** Split mapper and its descendants into object and field mappers. */ public static void collect(Mapper mapper, Collection objectMappers, Collection fieldMappers) { if (mapper instanceof RootObjectMapper) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java index bac42162552..d33a97a4151 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -27,10 +27,12 @@ import org.elasticsearch.index.mapper.object.RootObjectMapper; import java.io.IOException; import java.util.Arrays; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; -import java.util.List; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; @@ -41,25 +43,27 @@ import static java.util.Collections.unmodifiableMap; */ public final class Mapping implements ToXContent { - public static final List LEGACY_INCLUDE_IN_OBJECT = Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl"); + // Set of fields that were included into the root object mapper before 2.0 + public static final Set LEGACY_INCLUDE_IN_OBJECT = Collections.unmodifiableSet(new HashSet<>( + Arrays.asList("_all", "_id", "_parent", "_routing", "_timestamp", "_ttl"))); final Version indexCreated; final RootObjectMapper root; final MetadataFieldMapper[] metadataMappers; final Map, MetadataFieldMapper> metadataMappersMap; - volatile Map meta; + final Map meta; public Mapping(Version indexCreated, RootObjectMapper rootObjectMapper, MetadataFieldMapper[] metadataMappers, Map meta) { this.indexCreated = indexCreated; - this.root = rootObjectMapper; this.metadataMappers = metadataMappers; Map, MetadataFieldMapper> metadataMappersMap = new HashMap<>(); for (MetadataFieldMapper metadataMapper : metadataMappers) { if (indexCreated.before(Version.V_2_0_0_beta1) && LEGACY_INCLUDE_IN_OBJECT.contains(metadataMapper.name())) { - root.putMapper(metadataMapper); + rootObjectMapper = rootObjectMapper.copyAndPutMapper(metadataMapper); } metadataMappersMap.put(metadataMapper.getClass(), metadataMapper); } + this.root = rootObjectMapper; // keep root mappers sorted for consistent serialization Arrays.sort(metadataMappers, new Comparator() { @Override @@ -90,21 +94,20 @@ public final class Mapping implements ToXContent { } /** @see DocumentMapper#merge(Mapping, boolean, boolean) */ - public void merge(Mapping mergeWith, MergeResult mergeResult) { - assert metadataMappers.length == mergeWith.metadataMappers.length; - - root.merge(mergeWith.root, mergeResult); - for (MetadataFieldMapper metadataMapper : metadataMappers) { - MetadataFieldMapper mergeWithMetadataMapper = mergeWith.metadataMapper(metadataMapper.getClass()); - if (mergeWithMetadataMapper != null) { - metadataMapper.merge(mergeWithMetadataMapper, mergeResult); + public Mapping merge(Mapping mergeWith, boolean updateAllTypes) { + RootObjectMapper mergedRoot = root.merge(mergeWith.root, updateAllTypes); + Map, MetadataFieldMapper> mergedMetaDataMappers = new HashMap<>(metadataMappersMap); + for (MetadataFieldMapper metaMergeWith : mergeWith.metadataMappers) { + MetadataFieldMapper mergeInto = mergedMetaDataMappers.get(metaMergeWith.getClass()); + MetadataFieldMapper merged; + if (mergeInto == null) { + merged = metaMergeWith; + } else { + merged = mergeInto.merge(metaMergeWith, updateAllTypes); } + mergedMetaDataMappers.put(merged.getClass(), merged); } - - if (mergeResult.simulate() == false) { - // let the merge with attributes to override the attributes - meta = mergeWith.meta; - } + return new Mapping(indexCreated, mergedRoot, mergedMetaDataMappers.values().toArray(new MetadataFieldMapper[0]), mergeWith.meta); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java b/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java deleted file mode 100644 index f5698a0ed18..00000000000 --- a/core/src/main/java/org/elasticsearch/index/mapper/MergeResult.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.index.mapper.object.ObjectMapper; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; - -/** A container for tracking results of a mapping merge. */ -public class MergeResult { - - private final boolean simulate; - private final boolean updateAllTypes; - - private final List conflicts = new ArrayList<>(); - private final List newFieldMappers = new ArrayList<>(); - private final List newObjectMappers = new ArrayList<>(); - - public MergeResult(boolean simulate, boolean updateAllTypes) { - this.simulate = simulate; - this.updateAllTypes = updateAllTypes; - } - - public void addFieldMappers(Collection fieldMappers) { - assert simulate() == false; - newFieldMappers.addAll(fieldMappers); - } - - public void addObjectMappers(Collection objectMappers) { - assert simulate() == false; - newObjectMappers.addAll(objectMappers); - } - - public Collection getNewFieldMappers() { - return newFieldMappers; - } - - public Collection getNewObjectMappers() { - return newObjectMappers; - } - - public boolean simulate() { - return simulate; - } - - public boolean updateAllTypes() { - return updateAllTypes; - } - - public void addConflict(String mergeFailure) { - conflicts.add(mergeFailure); - } - - public boolean hasConflicts() { - return conflicts.isEmpty() == false; - } - - public String[] buildConflicts() { - return conflicts.toArray(Strings.EMPTY_ARRAY); - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java index fc6d1fa9e1a..622c7729dd4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/MetadataFieldMapper.java @@ -51,8 +51,8 @@ public abstract class MetadataFieldMapper extends FieldMapper { } public abstract static class Builder extends FieldMapper.Builder { - public Builder(String name, MappedFieldType fieldType) { - super(name, fieldType); + public Builder(String name, MappedFieldType fieldType, MappedFieldType defaultFieldType) { + super(name, fieldType, defaultFieldType); } } @@ -70,4 +70,8 @@ public abstract class MetadataFieldMapper extends FieldMapper { */ public abstract void postParse(ParseContext context) throws IOException; + @Override + public MetadataFieldMapper merge(Mapper mergeWith, boolean updateAllTypes) { + return (MetadataFieldMapper) super.merge(mergeWith, updateAllTypes); + } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index edf75621c1e..0a88e29c8d6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -595,7 +595,7 @@ public abstract class ParseContext { if (dynamicMappingsUpdate == null) { dynamicMappingsUpdate = mapper; } else { - MapperUtils.merge(dynamicMappingsUpdate, mapper); + dynamicMappingsUpdate = dynamicMappingsUpdate.merge(mapper, false); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java index ed8314c6f7d..aa35e699b2d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ParsedDocument.java @@ -128,7 +128,7 @@ public class ParsedDocument { if (dynamicMappingsUpdate == null) { dynamicMappingsUpdate = update; } else { - MapperUtils.merge(dynamicMappingsUpdate, update); + dynamicMappingsUpdate = dynamicMappingsUpdate.merge(update, false); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index 7468f4fb2f6..0ee311678ef 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -72,7 +72,7 @@ public class BinaryFieldMapper extends FieldMapper { public static class Builder extends FieldMapper.Builder { public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index cd76fdbb047..e381bc9c60b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -72,7 +72,7 @@ public class BooleanFieldMapper extends FieldMapper { public static class Builder extends FieldMapper.Builder { public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); this.builder = this; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 61b22a1ee26..44b4cbcd35e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -77,8 +77,7 @@ public class ByteFieldMapper extends NumberFieldMapper { setupFieldType(context); ByteFieldMapper fieldMapper = new ByteFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (ByteFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 5b4df635a34..69177401db7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -356,7 +356,7 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp * @param name of the completion field to build */ public Builder(String name) { - super(name, new CompletionFieldType()); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; } @@ -605,11 +605,9 @@ public class CompletionFieldMapper extends FieldMapper implements ArrayValueMapp } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith; - if (!mergeResult.simulate()) { - this.maxInputLength = fieldMergeWith.maxInputLength; - } + this.maxInputLength = fieldMergeWith.maxInputLength; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 27b96b27a44..7a99e6b50c0 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -123,8 +123,7 @@ public class DateFieldMapper extends NumberFieldMapper { fieldType.setNullValue(nullValue); DateFieldMapper fieldMapper = new DateFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (DateFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 0e512bf4281..861d33e560e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -80,8 +80,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { setupFieldType(context); DoubleFieldMapper fieldMapper = new DoubleFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (DoubleFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index 9a607ffd415..ad88c745dfd 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -81,8 +81,7 @@ public class FloatFieldMapper extends NumberFieldMapper { setupFieldType(context); FloatFieldMapper fieldMapper = new FloatFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (FloatFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index 868cfeb4380..18995498113 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -85,8 +85,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { IntegerFieldMapper fieldMapper = new IntegerFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (IntegerFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index 4130c902586..9d9557c41f4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -84,8 +84,7 @@ public class LongFieldMapper extends NumberFieldMapper { setupFieldType(context); LongFieldMapper fieldMapper = new LongFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (LongFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 87a63de99ec..ed537aa7e5f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -66,7 +66,7 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM private Boolean coerce; public Builder(String name, MappedFieldType fieldType, int defaultPrecisionStep) { - super(name, fieldType); + super(name, fieldType, fieldType); this.fieldType.setNumericPrecisionStep(defaultPrecisionStep); } @@ -183,22 +183,41 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM } @Override - public void includeInAll(Boolean includeInAll) { + protected NumberFieldMapper clone() { + return (NumberFieldMapper) super.clone(); + } + + @Override + public Mapper includeInAll(Boolean includeInAll) { if (includeInAll != null) { - this.includeInAll = includeInAll; + NumberFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void includeInAllIfNotSet(Boolean includeInAll) { + public Mapper includeInAllIfNotSet(Boolean includeInAll) { if (includeInAll != null && this.includeInAll == null) { - this.includeInAll = includeInAll; + NumberFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void unsetIncludeInAll() { - includeInAll = null; + public Mapper unsetIncludeInAll() { + if (includeInAll != null) { + NumberFieldMapper clone = clone(); + clone.includeInAll = null; + return clone; + } else { + return this; + } } @Override @@ -254,21 +273,16 @@ public abstract class NumberFieldMapper extends FieldMapper implements AllFieldM } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith; - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - this.includeInAll = nfmMergeWith.includeInAll; - if (nfmMergeWith.ignoreMalformed.explicit()) { - this.ignoreMalformed = nfmMergeWith.ignoreMalformed; - } - if (nfmMergeWith.coerce.explicit()) { - this.coerce = nfmMergeWith.coerce; - } + this.includeInAll = nfmMergeWith.includeInAll; + if (nfmMergeWith.ignoreMalformed.explicit()) { + this.ignoreMalformed = nfmMergeWith.ignoreMalformed; + } + if (nfmMergeWith.coerce.explicit()) { + this.coerce = nfmMergeWith.coerce; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index 81ed6cc3bac..e455959c530 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -81,8 +81,7 @@ public class ShortFieldMapper extends NumberFieldMapper { ShortFieldMapper fieldMapper = new ShortFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (ShortFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index 0a921ad85eb..08582c65997 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.internal.AllFieldMapper; @@ -99,7 +98,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc protected int ignoreAbove = Defaults.IGNORE_ABOVE; public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); builder = this; } @@ -150,8 +149,7 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc StringFieldMapper fieldMapper = new StringFieldMapper( name, fieldType, defaultFieldType, positionIncrementGap, ignoreAbove, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return fieldMapper.includeInAll(includeInAll); } } @@ -257,22 +255,41 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } @Override - public void includeInAll(Boolean includeInAll) { + protected StringFieldMapper clone() { + return (StringFieldMapper) super.clone(); + } + + @Override + public StringFieldMapper includeInAll(Boolean includeInAll) { if (includeInAll != null) { - this.includeInAll = includeInAll; + StringFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void includeInAllIfNotSet(Boolean includeInAll) { + public StringFieldMapper includeInAllIfNotSet(Boolean includeInAll) { if (includeInAll != null && this.includeInAll == null) { - this.includeInAll = includeInAll; + StringFieldMapper clone = clone(); + clone.includeInAll = includeInAll; + return clone; + } else { + return this; } } @Override - public void unsetIncludeInAll() { - includeInAll = null; + public StringFieldMapper unsetIncludeInAll() { + if (includeInAll != null) { + StringFieldMapper clone = clone(); + clone.includeInAll = null; + return clone; + } else { + return this; + } } @Override @@ -359,15 +376,10 @@ public class StringFieldMapper extends FieldMapper implements AllFieldMapper.Inc } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } - if (!mergeResult.simulate()) { - this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll; - this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); + this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll; + this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java index 8348892e44a..a485c3727fc 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost; @@ -81,8 +80,7 @@ public class TokenCountFieldMapper extends IntegerFieldMapper { TokenCountFieldMapper fieldMapper = new TokenCountFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), analyzer, multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (TokenCountFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override @@ -190,14 +188,9 @@ public class TokenCountFieldMapper extends IntegerFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } - if (!mergeResult.simulate()) { - this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); + this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index e530243657c..f6bd4946eb2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -61,7 +61,6 @@ public class TypeParsers { @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - ContentPath.Type pathType = null; FieldMapper.Builder mainFieldBuilder = null; List fields = null; String firstType = null; @@ -70,10 +69,7 @@ public class TypeParsers { Map.Entry entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); - if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - pathType = parsePathType(name, fieldNode.toString()); - iterator.remove(); - } else if (fieldName.equals("fields")) { + if (fieldName.equals("fields")) { Map fieldsNode = (Map) fieldNode; for (Iterator> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) { Map.Entry entry1 = fieldsIterator.next(); @@ -132,17 +128,10 @@ public class TypeParsers { } } - if (fields != null && pathType != null) { + if (fields != null) { for (Mapper.Builder field : fields) { mainFieldBuilder.addMultiField(field); } - mainFieldBuilder.multiFieldPathType(pathType); - } else if (fields != null) { - for (Mapper.Builder field : fields) { - mainFieldBuilder.addMultiField(field); - } - } else if (pathType != null) { - mainFieldBuilder.multiFieldPathType(pathType); } return mainFieldBuilder; } @@ -337,10 +326,7 @@ public class TypeParsers { public static boolean parseMultiField(FieldMapper.Builder builder, String name, Mapper.TypeParser.ParserContext parserContext, String propName, Object propNode) { parserContext = parserContext.createMultiFieldContext(parserContext); - if (propName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - builder.multiFieldPathType(parsePathType(name, propNode.toString())); - return true; - } else if (propName.equals("fields")) { + if (propName.equals("fields")) { final Map multiFieldsPropNodes; @@ -457,17 +443,6 @@ public class TypeParsers { } } - public static ContentPath.Type parsePathType(String name, String path) throws MapperParsingException { - path = Strings.toUnderscoreCase(path); - if ("just_name".equals(path)) { - return ContentPath.Type.JUST_NAME; - } else if ("full".equals(path)) { - return ContentPath.Type.FULL; - } else { - throw new MapperParsingException("wrong value for pathType [" + path + "] for object [" + name + "]"); - } - } - @SuppressWarnings("unchecked") public static void parseCopyFields(Object propNode, FieldMapper.Builder builder) { FieldMapper.CopyTo.Builder copyToBuilder = new FieldMapper.CopyTo.Builder(); diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java index 0b57d866ddd..0bbe2fe8f1b 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/BaseGeoPointFieldMapper.java @@ -33,12 +33,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; @@ -74,7 +72,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } public static class Defaults { - public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL; public static final boolean ENABLE_LATLON = false; public static final boolean ENABLE_GEOHASH = false; public static final boolean ENABLE_GEOHASH_PREFIX = false; @@ -83,7 +80,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } public abstract static class Builder extends FieldMapper.Builder { - protected ContentPath.Type pathType = Defaults.PATH_TYPE; protected boolean enableLatLon = Defaults.ENABLE_LATLON; @@ -98,7 +94,7 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr protected Boolean ignoreMalformed; public Builder(String name, GeoPointFieldType fieldType) { - super(name, fieldType); + super(name, fieldType, fieldType); } @Override @@ -106,12 +102,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr return (GeoPointFieldType)fieldType; } - @Override - public T multiFieldPathType(ContentPath.Type pathType) { - this.pathType = pathType; - return builder; - } - @Override public T fieldDataSettings(Settings settings) { this.fieldDataSettings = settings; @@ -159,13 +149,10 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } public abstract Y build(BuilderContext context, String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, - Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, + Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo); public Y build(Mapper.BuilderContext context) { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); - GeoPointFieldType geoPointFieldType = (GeoPointFieldType)fieldType; DoubleFieldMapper latMapper = null; @@ -191,9 +178,8 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr geoPointFieldType.setGeoHashEnabled(geoHashMapper.fieldType(), geoHashPrecision, enableGeoHashPrefix); } context.path().remove(); - context.path().pathType(origPathType); - return build(context, name, fieldType, defaultFieldType, context.indexSettings(), origPathType, + return build(context, name, fieldType, defaultFieldType, context.indexSettings(), latMapper, lonMapper, geoHashMapper, multiFieldsBuilder.build(this, context), ignoreMalformed(context), copyTo); } } @@ -365,17 +351,14 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr protected final DoubleFieldMapper lonMapper; - protected final ContentPath.Type pathType; - protected final StringFieldMapper geoHashMapper; protected Explicit ignoreMalformed; protected BaseGeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, - ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, + DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo) { super(simpleName, fieldType, defaultFieldType, indexSettings, multiFields, copyTo); - this.pathType = pathType; this.latMapper = latMapper; this.lonMapper = lonMapper; this.geoHashMapper = geoHashMapper; @@ -388,17 +371,11 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } - + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); BaseGeoPointFieldMapper gpfmMergeWith = (BaseGeoPointFieldMapper) mergeWith; - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - if (gpfmMergeWith.ignoreMalformed.explicit()) { - this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; - } + if (gpfmMergeWith.ignoreMalformed.explicit()) { + this.ignoreMalformed = gpfmMergeWith.ignoreMalformed; } } @@ -441,8 +418,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr @Override public Mapper parse(ParseContext context) throws IOException { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); context.path().add(simpleName()); GeoPoint sparse = context.parseExternalValue(GeoPoint.class); @@ -487,7 +462,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr } context.path().remove(); - context.path().pathType(origPathType); return null; } @@ -512,9 +486,6 @@ public abstract class BaseGeoPointFieldMapper extends FieldMapper implements Arr @Override protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { super.doXContentBody(builder, includeDefaults, params); - if (includeDefaults || pathType != Defaults.PATH_TYPE) { - builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); - } if (includeDefaults || fieldType().isLatLonEnabled() != GeoPointFieldMapper.Defaults.ENABLE_LATLON) { builder.field("lat_lon", fieldType().isLatLonEnabled()); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index 286aca29727..fa61669e800 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -27,7 +27,6 @@ import org.elasticsearch.common.Explicit; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; @@ -81,12 +80,12 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { @Override public GeoPointFieldMapper build(BuilderContext context, String simpleName, MappedFieldType fieldType, - MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, + MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo) { fieldType.setTokenized(false); setupFieldType(context); - return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, + return new GeoPointFieldMapper(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); } @@ -104,9 +103,9 @@ public class GeoPointFieldMapper extends BaseGeoPointFieldMapper { } public GeoPointFieldMapper(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, - ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, + DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo) { - super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields, + super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java index 84e6bde07ac..735baa88533 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperLegacy.java @@ -35,11 +35,9 @@ import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.util.ByteUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.DoubleFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper.CustomNumericDocValuesField; @@ -111,14 +109,14 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement @Override public GeoPointFieldMapperLegacy build(BuilderContext context, String simpleName, MappedFieldType fieldType, - MappedFieldType defaultFieldType, Settings indexSettings, ContentPath.Type pathType, DoubleFieldMapper latMapper, + MappedFieldType defaultFieldType, Settings indexSettings, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, CopyTo copyTo) { fieldType.setTokenized(false); setupFieldType(context); fieldType.setHasDocValues(false); defaultFieldType.setHasDocValues(false); - return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, + return new GeoPointFieldMapperLegacy(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, coerce(context), copyTo); } @@ -288,32 +286,27 @@ public class GeoPointFieldMapperLegacy extends BaseGeoPointFieldMapper implement protected Explicit coerce; public GeoPointFieldMapperLegacy(String simpleName, MappedFieldType fieldType, MappedFieldType defaultFieldType, Settings indexSettings, - ContentPath.Type pathType, DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, + DoubleFieldMapper latMapper, DoubleFieldMapper lonMapper, StringFieldMapper geoHashMapper, MultiFields multiFields, Explicit ignoreMalformed, Explicit coerce, CopyTo copyTo) { - super(simpleName, fieldType, defaultFieldType, indexSettings, pathType, latMapper, lonMapper, geoHashMapper, multiFields, + super(simpleName, fieldType, defaultFieldType, indexSettings, latMapper, lonMapper, geoHashMapper, multiFields, ignoreMalformed, copyTo); this.coerce = coerce; } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); GeoPointFieldMapperLegacy gpfmMergeWith = (GeoPointFieldMapperLegacy) mergeWith; if (gpfmMergeWith.coerce.explicit()) { if (coerce.explicit() && coerce.value() != gpfmMergeWith.coerce.value()) { - mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] has different [coerce]"); + throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] has different [coerce]"); } } - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - if (gpfmMergeWith.coerce.explicit()) { - this.coerce = gpfmMergeWith.coerce; - } + if (gpfmMergeWith.coerce.explicit()) { + this.coerce = gpfmMergeWith.coerce; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 7e784324f36..1ba49e64d80 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -45,7 +45,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import java.io.IOException; @@ -121,7 +120,7 @@ public class GeoShapeFieldMapper extends FieldMapper { private Boolean coerce; public Builder(String name) { - super(name, Defaults.FIELD_TYPE); + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); } @Override @@ -185,7 +184,7 @@ public class GeoShapeFieldMapper extends FieldMapper { builder.fieldType().setDistanceErrorPct(Double.parseDouble(fieldNode.toString())); iterator.remove(); } else if (Names.ORIENTATION.equals(fieldName)) { - builder.fieldType().setOrientation(ShapeBuilder.orientationFromString(fieldNode.toString())); + builder.fieldType().setOrientation(ShapeBuilder.Orientation.fromString(fieldNode.toString())); iterator.remove(); } else if (Names.STRATEGY.equals(fieldName)) { builder.fieldType().setStrategyName(fieldNode.toString()); @@ -193,7 +192,8 @@ public class GeoShapeFieldMapper extends FieldMapper { } else if (Names.COERCE.equals(fieldName)) { builder.coerce(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName)) { + } else if (Names.STRATEGY_POINTS_ONLY.equals(fieldName) + && builder.fieldType().strategyName.equals(SpatialStrategy.TERM.getStrategyName()) == false) { builder.fieldType().setPointsOnly(XContentMapValues.nodeBooleanValue(fieldNode)); iterator.remove(); } @@ -284,6 +284,7 @@ public class GeoShapeFieldMapper extends FieldMapper { termStrategy = new TermQueryPrefixTreeStrategy(prefixTree, names().indexName()); termStrategy.setDistErrPct(distanceErrorPct()); defaultStrategy = resolveStrategy(strategyName); + defaultStrategy.setPointsOnly(pointsOnly); } @Override @@ -347,6 +348,9 @@ public class GeoShapeFieldMapper extends FieldMapper { public void setStrategyName(String strategyName) { checkIfFrozen(); this.strategyName = strategyName; + if (this.strategyName.equals(SpatialStrategy.TERM)) { + this.pointsOnly = true; + } } public boolean pointsOnly() { @@ -406,7 +410,6 @@ public class GeoShapeFieldMapper extends FieldMapper { public PrefixTreeStrategy resolveStrategy(String strategyName) { if (SpatialStrategy.RECURSIVE.getStrategyName().equals(strategyName)) { - recursiveStrategy.setPointsOnly(pointsOnly()); return recursiveStrategy; } if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) { @@ -446,7 +449,7 @@ public class GeoShapeFieldMapper extends FieldMapper { } shape = shapeBuilder.build(); } - if (fieldType().defaultStrategy() instanceof RecursivePrefixTreeStrategy && fieldType().pointsOnly() && !(shape instanceof Point)) { + if (fieldType().pointsOnly() && !(shape instanceof Point)) { throw new MapperParsingException("[{" + fieldType().names().fullName() + "}] is configured for points only but a " + ((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found"); } @@ -471,17 +474,12 @@ public class GeoShapeFieldMapper extends FieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); - if (!this.getClass().equals(mergeWith.getClass())) { - return; - } + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); GeoShapeFieldMapper gsfm = (GeoShapeFieldMapper)mergeWith; - if (mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { - if (gsfm.coerce.explicit()) { - this.coerce = gsfm.coerce; - } + if (gsfm.coerce.explicit()) { + this.coerce = gsfm.coerce; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 645c36a4855..bcd094d2ae6 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -36,7 +36,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryShardContext; @@ -58,11 +57,24 @@ public class AllFieldMapper extends MetadataFieldMapper { public interface IncludeInAll { - void includeInAll(Boolean includeInAll); + /** + * If {@code includeInAll} is not null then return a copy of this mapper + * that will include values in the _all field according to {@code includeInAll}. + */ + Mapper includeInAll(Boolean includeInAll); - void includeInAllIfNotSet(Boolean includeInAll); + /** + * If {@code includeInAll} is not null and not set on this mapper yet, then + * return a copy of this mapper that will include values in the _all field + * according to {@code includeInAll}. + */ + Mapper includeInAllIfNotSet(Boolean includeInAll); - void unsetIncludeInAll(); + /** + * If {@code includeInAll} was already set on this mapper then return a copy + * of this mapper that has {@code includeInAll} not set. + */ + Mapper unsetIncludeInAll(); } public static final String NAME = "_all"; @@ -89,7 +101,7 @@ public class AllFieldMapper extends MetadataFieldMapper { private EnabledAttributeMapper enabled = Defaults.ENABLED; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); builder = this; indexName = Defaults.INDEX_NAME; } @@ -309,11 +321,11 @@ public class AllFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) { - mergeResult.addConflict("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); + throw new IllegalArgumentException("mapper [" + fieldType().names().fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); } - super.merge(mergeWith, mergeResult); + super.doMerge(mergeWith, updateAllTypes); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 7883415e59a..e03439f3f54 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -78,7 +78,7 @@ public class FieldNamesFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index 16b6c4c56da..0fe3e10bcb8 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; @@ -90,7 +89,7 @@ public class IdFieldMapper extends MetadataFieldMapper { private String path = Defaults.PATH; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } @@ -331,7 +330,7 @@ public class IdFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java index 962332b5c4b..dbbf03b72e2 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java @@ -34,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryShardContext; @@ -80,7 +79,7 @@ public class IndexFieldMapper extends MetadataFieldMapper { private EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } @@ -279,12 +278,10 @@ public class IndexFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith; - if (!mergeResult.simulate()) { - if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) { - this.enabledState = indexFieldMapperMergeWith.enabledState; - } + if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) { + this.enabledState = indexFieldMapperMergeWith.enabledState; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index 760259a1802..65daef2a834 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; @@ -98,7 +97,7 @@ public class ParentFieldMapper extends MetadataFieldMapper { private final MappedFieldType childJoinFieldType = Defaults.JOIN_FIELD_TYPE.clone(); public Builder(String documentType) { - super(Defaults.NAME, Defaults.FIELD_TYPE); + super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); this.indexName = name; this.documentType = documentType; builder = this; @@ -371,11 +370,11 @@ public class ParentFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { - super.merge(mergeWith, mergeResult); + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; if (Objects.equals(parentType, fieldMergeWith.parentType) == false) { - mergeResult.addConflict("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); + throw new IllegalArgumentException("The _parent field's type option can't be changed: [" + parentType + "]->[" + fieldMergeWith.parentType + "]"); } List conflicts = new ArrayList<>(); @@ -383,13 +382,13 @@ public class ParentFieldMapper extends MetadataFieldMapper { parentJoinFieldType.checkCompatibility(fieldMergeWith.parentJoinFieldType, conflicts, true); // same here if (childJoinFieldType != null) { // TODO: this can be set to false when the old parent/child impl is removed, we can do eager global ordinals loading per type. - childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, mergeResult.updateAllTypes() == false); + childJoinFieldType.checkCompatibility(fieldMergeWith.childJoinFieldType, conflicts, updateAllTypes == false); } - for (String conflict : conflicts) { - mergeResult.addConflict(conflict); + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Merge conflicts: " + conflicts); } - if (active() && mergeResult.simulate() == false && mergeResult.hasConflicts() == false) { + if (active()) { childJoinFieldType = fieldMergeWith.childJoinFieldType.clone(); } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java index 18d0645d2d5..40b7e6871c4 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; @@ -78,7 +77,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { private String path = Defaults.PATH; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); } public Builder required(boolean required) { @@ -249,7 +248,7 @@ public class RoutingFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index da3b8dbc5ab..40bf9eb0c8e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -29,13 +29,10 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; @@ -44,21 +41,17 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; -import java.io.BufferedInputStream; import java.io.IOException; -import java.io.InputStream; +import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.Objects; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; /** * @@ -72,8 +65,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { public static class Defaults { public static final String NAME = SourceFieldMapper.NAME; public static final boolean ENABLED = true; - public static final long COMPRESS_THRESHOLD = -1; - public static final String FORMAT = null; // default format is to use the one provided public static final MappedFieldType FIELD_TYPE = new SourceFieldType(); @@ -93,17 +84,11 @@ public class SourceFieldMapper extends MetadataFieldMapper { private boolean enabled = Defaults.ENABLED; - private long compressThreshold = Defaults.COMPRESS_THRESHOLD; - - private Boolean compress = null; - - private String format = Defaults.FORMAT; - private String[] includes = null; private String[] excludes = null; public Builder() { - super(Defaults.NAME, Defaults.FIELD_TYPE); + super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); } public Builder enabled(boolean enabled) { @@ -111,21 +96,6 @@ public class SourceFieldMapper extends MetadataFieldMapper { return this; } - public Builder compress(boolean compress) { - this.compress = compress; - return this; - } - - public Builder compressThreshold(long compressThreshold) { - this.compressThreshold = compressThreshold; - return this; - } - - public Builder format(String format) { - this.format = format; - return this; - } - public Builder includes(String[] includes) { this.includes = includes; return this; @@ -138,7 +108,7 @@ public class SourceFieldMapper extends MetadataFieldMapper { @Override public SourceFieldMapper build(BuilderContext context) { - return new SourceFieldMapper(enabled, format, compress, compressThreshold, includes, excludes, context.indexSettings()); + return new SourceFieldMapper(enabled, includes, excludes, context.indexSettings()); } } @@ -154,24 +124,8 @@ public class SourceFieldMapper extends MetadataFieldMapper { if (fieldName.equals("enabled")) { builder.enabled(nodeBooleanValue(fieldNode)); iterator.remove(); - } else if (fieldName.equals("compress") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - if (fieldNode != null) { - builder.compress(nodeBooleanValue(fieldNode)); - } - iterator.remove(); - } else if (fieldName.equals("compress_threshold") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - if (fieldNode != null) { - if (fieldNode instanceof Number) { - builder.compressThreshold(((Number) fieldNode).longValue()); - builder.compress(true); - } else { - builder.compressThreshold(ByteSizeValue.parseBytesSizeValue(fieldNode.toString(), "compress_threshold").bytes()); - builder.compress(true); - } - } - iterator.remove(); - } else if ("format".equals(fieldName)) { - builder.format(nodeStringValue(fieldNode, null)); + } else if ("format".equals(fieldName) && parserContext.indexVersionCreated().before(Version.V_3_0_0)) { + // ignore on old indices, reject on and after 3.0 iterator.remove(); } else if (fieldName.equals("includes")) { List values = (List) fieldNode; @@ -242,30 +196,18 @@ public class SourceFieldMapper extends MetadataFieldMapper { /** indicates whether the source will always exist and be complete, for use by features like the update API */ private final boolean complete; - private Boolean compress; - private long compressThreshold; - private final String[] includes; private final String[] excludes; - private String format; - - private XContentType formatContentType; - private SourceFieldMapper(Settings indexSettings) { - this(Defaults.ENABLED, Defaults.FORMAT, null, -1, null, null, indexSettings); + this(Defaults.ENABLED, null, null, indexSettings); } - private SourceFieldMapper(boolean enabled, String format, Boolean compress, long compressThreshold, - String[] includes, String[] excludes, Settings indexSettings) { + private SourceFieldMapper(boolean enabled, String[] includes, String[] excludes, Settings indexSettings) { super(NAME, Defaults.FIELD_TYPE.clone(), Defaults.FIELD_TYPE, indexSettings); // Only stored. this.enabled = enabled; - this.compress = compress; - this.compressThreshold = compressThreshold; this.includes = includes; this.excludes = excludes; - this.format = format; - this.formatContentType = format == null ? null : XContentType.fromRestContentType(format); this.complete = enabled && includes == null && excludes == null; } @@ -321,71 +263,11 @@ public class SourceFieldMapper extends MetadataFieldMapper { Tuple> mapTuple = XContentHelper.convertToMap(source, true); Map filteredSource = XContentMapValues.filter(mapTuple.v2(), includes, excludes); BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput streamOutput = bStream; - if (compress != null && compress && (compressThreshold == -1 || source.length() > compressThreshold)) { - streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - } - XContentType contentType = formatContentType; - if (contentType == null) { - contentType = mapTuple.v1(); - } - XContentBuilder builder = XContentFactory.contentBuilder(contentType, streamOutput).map(filteredSource); + XContentType contentType = mapTuple.v1(); + XContentBuilder builder = XContentFactory.contentBuilder(contentType, bStream).map(filteredSource); builder.close(); source = bStream.bytes(); - } else if (compress != null && compress && !CompressorFactory.isCompressed(source)) { - if (compressThreshold == -1 || source.length() > compressThreshold) { - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentType contentType = XContentFactory.xContentType(source); - if (formatContentType != null && formatContentType != contentType) { - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, CompressorFactory.defaultCompressor().streamOutput(bStream)); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); - builder.close(); - } else { - StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - source.writeTo(streamOutput); - streamOutput.close(); - } - source = bStream.bytes(); - // update the data in the context, so it can be compressed and stored compressed outside... - context.source(source); - } - } else if (formatContentType != null) { - // see if we need to convert the content type - Compressor compressor = CompressorFactory.compressor(source); - if (compressor != null) { - InputStream compressedStreamInput = compressor.streamInput(source.streamInput()); - if (compressedStreamInput.markSupported() == false) { - compressedStreamInput = new BufferedInputStream(compressedStreamInput); - } - XContentType contentType = XContentFactory.xContentType(compressedStreamInput); - if (contentType != formatContentType) { - // we need to reread and store back, compressed.... - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput streamOutput = CompressorFactory.defaultCompressor().streamOutput(bStream); - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, streamOutput); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(compressedStreamInput)); - builder.close(); - source = bStream.bytes(); - // update the data in the context, so we store it in the translog in this format - context.source(source); - } else { - compressedStreamInput.close(); - } - } else { - XContentType contentType = XContentFactory.xContentType(source); - if (contentType != formatContentType) { - // we need to reread and store back - // we need to reread and store back, compressed.... - BytesStreamOutput bStream = new BytesStreamOutput(); - XContentBuilder builder = XContentFactory.contentBuilder(formatContentType, bStream); - builder.copyCurrentStructure(XContentFactory.xContent(contentType).createParser(source)); - builder.close(); - source = bStream.bytes(); - // update the data in the context, so we store it in the translog in this format - context.source(source); - } - } } if (!source.hasArray()) { source = source.toBytesArray(); @@ -403,26 +285,13 @@ public class SourceFieldMapper extends MetadataFieldMapper { boolean includeDefaults = params.paramAsBoolean("include_defaults", false); // all are defaults, no need to write it at all - if (!includeDefaults && enabled == Defaults.ENABLED && compress == null && compressThreshold == -1 && includes == null && excludes == null) { + if (!includeDefaults && enabled == Defaults.ENABLED && includes == null && excludes == null) { return builder; } builder.startObject(contentType()); if (includeDefaults || enabled != Defaults.ENABLED) { builder.field("enabled", enabled); } - if (includeDefaults || !Objects.equals(format, Defaults.FORMAT)) { - builder.field("format", format); - } - if (compress != null) { - builder.field("compress", compress); - } else if (includeDefaults) { - builder.field("compress", false); - } - if (compressThreshold != -1) { - builder.field("compress_threshold", new ByteSizeValue(compressThreshold).toString()); - } else if (includeDefaults) { - builder.field("compress_threshold", -1); - } if (includes != null) { builder.field("includes", includes); @@ -441,25 +310,20 @@ public class SourceFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; - if (mergeResult.simulate()) { - if (this.enabled != sourceMergeWith.enabled) { - mergeResult.addConflict("Cannot update enabled setting for [_source]"); - } - if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) { - mergeResult.addConflict("Cannot update includes setting for [_source]"); - } - if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) { - mergeResult.addConflict("Cannot update excludes setting for [_source]"); - } - } else { - if (sourceMergeWith.compress != null) { - this.compress = sourceMergeWith.compress; - } - if (sourceMergeWith.compressThreshold != -1) { - this.compressThreshold = sourceMergeWith.compressThreshold; - } + List conflicts = new ArrayList<>(); + if (this.enabled != sourceMergeWith.enabled) { + conflicts.add("Cannot update enabled setting for [_source]"); + } + if (Arrays.equals(includes(), sourceMergeWith.includes()) == false) { + conflicts.add("Cannot update includes setting for [_source]"); + } + if (Arrays.equals(excludes(), sourceMergeWith.excludes()) == false) { + conflicts.add("Cannot update excludes setting for [_source]"); + } + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Can't merge because of conflicts: " + conflicts); } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java index 9a18befe622..f99ca18600a 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java @@ -32,7 +32,6 @@ import org.elasticsearch.index.analysis.NumericLongAnalyzer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.SourceToParse; @@ -79,7 +78,7 @@ public class TTLFieldMapper extends MetadataFieldMapper { private long defaultTTL = Defaults.DEFAULT; public Builder() { - super(Defaults.NAME, Defaults.TTL_FIELD_TYPE); + super(Defaults.NAME, Defaults.TTL_FIELD_TYPE, Defaults.FIELD_TYPE); } public Builder enabled(EnabledAttributeMapper enabled) { @@ -258,21 +257,19 @@ public class TTLFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith; - if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with - if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) { - mergeResult.addConflict("_ttl cannot be disabled once it was enabled."); + if (ttlMergeWith.enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with + if (this.enabledState == EnabledAttributeMapper.ENABLED && ttlMergeWith.enabledState == EnabledAttributeMapper.DISABLED) { + throw new IllegalArgumentException("_ttl cannot be disabled once it was enabled."); } else { - if (!mergeResult.simulate()) { - this.enabledState = ttlMergeWith.enabledState; - } + this.enabledState = ttlMergeWith.enabledState; } } if (ttlMergeWith.defaultTTL != -1) { // we never build the default when the field is disabled so we should also not set it // (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster) - if (!mergeResult.simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) { + if (enabledState == EnabledAttributeMapper.ENABLED) { this.defaultTTL = ttlMergeWith.defaultTTL; } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java index 468243d63cf..b0606f1994f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java @@ -33,13 +33,13 @@ import org.elasticsearch.index.analysis.NumericDateAnalyzer; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.mapper.core.LongFieldMapper; import java.io.IOException; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -96,7 +96,7 @@ public class TimestampFieldMapper extends MetadataFieldMapper { private Boolean ignoreMissing = null; public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); if (existing != null) { // if there is an existing type, always use that store value (only matters for < 2.0) explicitStore = true; @@ -379,31 +379,32 @@ public class TimestampFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith; - super.merge(mergeWith, mergeResult); - if (!mergeResult.simulate()) { - if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) { - this.enabledState = timestampFieldMapperMergeWith.enabledState; - } - } else { - if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) { - return; - } - if (defaultTimestamp == null) { - mergeResult.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); - } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) { - mergeResult.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); - } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) { - mergeResult.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); - } - if (this.path != null) { - if (path.equals(timestampFieldMapperMergeWith.path()) == false) { - mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); - } - } else if (timestampFieldMapperMergeWith.path() != null) { - mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); + super.doMerge(mergeWith, updateAllTypes); + if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) { + this.enabledState = timestampFieldMapperMergeWith.enabledState; + } + if (timestampFieldMapperMergeWith.defaultTimestamp() == null && defaultTimestamp == null) { + return; + } + List conflicts = new ArrayList<>(); + if (defaultTimestamp == null) { + conflicts.add("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) { + conflicts.add("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); + } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) { + conflicts.add("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + } + if (this.path != null) { + if (path.equals(timestampFieldMapperMergeWith.path()) == false) { + conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); } + } else if (timestampFieldMapperMergeWith.path() != null) { + conflicts.add("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); + } + if (conflicts.isEmpty() == false) { + throw new IllegalArgumentException("Conflicts: " + conflicts); } } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index d4acc3c5975..c529db5183e 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.Uid; @@ -81,7 +80,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder { public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } @@ -225,7 +224,7 @@ public class TypeFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java index ef4c48e62e3..10f9880d97d 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -79,7 +78,7 @@ public class UidFieldMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder { public Builder(MappedFieldType existing) { - super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing); + super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); indexName = Defaults.NAME; } @@ -225,7 +224,7 @@ public class UidFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // do nothing here, no merging, but also no exception } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java index 292a622ab73..6b1471afda7 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -62,7 +61,7 @@ public class VersionFieldMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder { public Builder() { - super(Defaults.NAME, Defaults.FIELD_TYPE); + super(Defaults.NAME, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); } @Override @@ -166,7 +165,7 @@ public class VersionFieldMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // nothing to do } } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index e57ceaf8ca8..d8a7c752e6f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -122,8 +122,7 @@ public class IpFieldMapper extends NumberFieldMapper { setupFieldType(context); IpFieldMapper fieldMapper = new IpFieldMapper(name, fieldType, defaultFieldType, ignoreMalformed(context), coerce(context), context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo); - fieldMapper.includeInAll(includeInAll); - return fieldMapper; + return (IpFieldMapper) fieldMapper.includeInAll(includeInAll); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java index c51264f3dba..58602f06dfa 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java @@ -125,13 +125,13 @@ public class DynamicTemplate { } public boolean match(ContentPath path, String name, String dynamicType) { - if (pathMatch != null && !patternMatch(pathMatch, path.fullPathAsText(name))) { + if (pathMatch != null && !patternMatch(pathMatch, path.pathAsText(name))) { return false; } if (match != null && !patternMatch(match, name)) { return false; } - if (pathUnmatch != null && patternMatch(pathUnmatch, path.fullPathAsText(name))) { + if (pathUnmatch != null && patternMatch(pathUnmatch, path.pathAsText(name))) { return false; } if (unmatch != null && patternMatch(unmatch, name)) { diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index 88f89719050..c2d9783fc9f 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.CopyOnWriteHashMap; @@ -40,7 +39,6 @@ import java.util.*; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.index.mapper.MapperBuilders.object; -import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType; /** * @@ -54,7 +52,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, public static final boolean ENABLED = true; public static final Nested NESTED = Nested.NO; public static final Dynamic DYNAMIC = null; // not set, inherited from root - public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL; } public static enum Dynamic { @@ -104,8 +101,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, protected Dynamic dynamic = Defaults.DYNAMIC; - protected ContentPath.Type pathType = Defaults.PATH_TYPE; - protected Boolean includeInAll; protected final List mappersBuilders = new ArrayList<>(); @@ -130,11 +125,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return builder; } - public T pathType(ContentPath.Type pathType) { - this.pathType = pathType; - return builder; - } - public T includeInAll(boolean includeInAll) { this.includeInAll = includeInAll; return builder; @@ -147,8 +137,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, @Override public Y build(BuilderContext context) { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); context.path().add(name); Map mappers = new HashMap<>(); @@ -156,17 +144,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, Mapper mapper = builder.build(context); mappers.put(mapper.simpleName(), mapper); } - context.path().pathType(origPathType); context.path().remove(); - ObjectMapper objectMapper = createMapper(name, context.path().fullPathAsText(name), enabled, nested, dynamic, pathType, mappers, context.indexSettings()); - objectMapper.includeInAllIfNotSet(includeInAll); + ObjectMapper objectMapper = createMapper(name, context.path().pathAsText(name), enabled, nested, dynamic, mappers, context.indexSettings()); + objectMapper = objectMapper.includeInAllIfNotSet(includeInAll); return (Y) objectMapper; } - protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map mappers, @Nullable Settings settings) { - return new ObjectMapper(name, fullPath, enabled, nested, dynamic, pathType, mappers); + protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map mappers, @Nullable Settings settings) { + return new ObjectMapper(name, fullPath, enabled, nested, dynamic, mappers); } } @@ -179,7 +166,7 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, Map.Entry entry = iterator.next(); String fieldName = Strings.toUnderscoreCase(entry.getKey()); Object fieldNode = entry.getValue(); - if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder) || parseObjectProperties(name, fieldName, fieldNode, parserContext, builder)) { + if (parseObjectOrDocumentTypeProperties(fieldName, fieldNode, parserContext, builder)) { iterator.remove(); } } @@ -214,14 +201,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return false; } - protected static boolean parseObjectProperties(String name, String fieldName, Object fieldNode, ParserContext parserContext, ObjectMapper.Builder builder) { - if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) { - builder.pathType(parsePathType(name, fieldNode.toString())); - return true; - } - return false; - } - protected static void parseNested(String name, Map node, ObjectMapper.Builder builder) { boolean nested = false; boolean nestedIncludeInParent = false; @@ -326,19 +305,16 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, private volatile Dynamic dynamic; - private final ContentPath.Type pathType; - private Boolean includeInAll; private volatile CopyOnWriteHashMap mappers; - ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map mappers) { + ObjectMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map mappers) { super(name); this.fullPath = fullPath; this.enabled = enabled; this.nested = nested; this.dynamic = dynamic; - this.pathType = pathType; if (mappers == null) { this.mappers = new CopyOnWriteHashMap<>(); } else { @@ -380,50 +356,58 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return this.enabled; } - public ContentPath.Type pathType() { - return pathType; - } - public Mapper getMapper(String field) { return mappers.get(field); } @Override - public void includeInAll(Boolean includeInAll) { + public ObjectMapper includeInAll(Boolean includeInAll) { if (includeInAll == null) { - return; + return this; } - this.includeInAll = includeInAll; + + ObjectMapper clone = clone(); + clone.includeInAll = includeInAll; // when called from outside, apply this on all the inner mappers - for (Mapper mapper : mappers.values()) { + for (Mapper mapper : clone.mappers.values()) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll); + clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAll(includeInAll)); } } + return clone; } @Override - public void includeInAllIfNotSet(Boolean includeInAll) { - if (this.includeInAll == null) { - this.includeInAll = includeInAll; + public ObjectMapper includeInAllIfNotSet(Boolean includeInAll) { + if (includeInAll == null || this.includeInAll != null) { + return this; } + + ObjectMapper clone = clone(); + clone.includeInAll = includeInAll; // when called from outside, apply this on all the inner mappers - for (Mapper mapper : mappers.values()) { + for (Mapper mapper : clone.mappers.values()) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); + clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll)); } } + return clone; } @Override - public void unsetIncludeInAll() { - includeInAll = null; + public ObjectMapper unsetIncludeInAll() { + if (includeInAll == null) { + return this; + } + ObjectMapper clone = clone(); + clone.includeInAll = null; // when called from outside, apply this on all the inner mappers for (Mapper mapper : mappers.values()) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); + clone.putMapper(((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll()); } } + return clone; } public Nested nested() { @@ -434,14 +418,9 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, return this.nestedTypeFilter; } - /** - * Put a new mapper. - * NOTE: this method must be called under the current {@link DocumentMapper} - * lock if concurrent updates are expected. - */ - public void putMapper(Mapper mapper) { + protected void putMapper(Mapper mapper) { if (mapper instanceof AllFieldMapper.IncludeInAll) { - ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); + mapper = ((AllFieldMapper.IncludeInAll) mapper).includeInAllIfNotSet(includeInAll); } mappers = mappers.copyAndPut(mapper.simpleName(), mapper); } @@ -464,64 +443,43 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, } @Override - public void merge(final Mapper mergeWith, final MergeResult mergeResult) { + public ObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { if (!(mergeWith instanceof ObjectMapper)) { - mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); - return; + throw new IllegalArgumentException("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; - - if (nested().isNested()) { - if (!mergeWithObject.nested().isNested()) { - mergeResult.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested"); - return; - } - } else { - if (mergeWithObject.nested().isNested()) { - mergeResult.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested"); - return; - } - } - - if (!mergeResult.simulate()) { - if (mergeWithObject.dynamic != null) { - this.dynamic = mergeWithObject.dynamic; - } - } - - doMerge(mergeWithObject, mergeResult); - - List mappersToPut = new ArrayList<>(); - List newObjectMappers = new ArrayList<>(); - List newFieldMappers = new ArrayList<>(); - for (Mapper mapper : mergeWithObject) { - Mapper mergeWithMapper = mapper; - Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); - if (mergeIntoMapper == null) { - // no mapping, simply add it if not simulating - if (!mergeResult.simulate()) { - mappersToPut.add(mergeWithMapper); - MapperUtils.collect(mergeWithMapper, newObjectMappers, newFieldMappers); - } - } else if (mergeIntoMapper instanceof MetadataFieldMapper == false) { - // root mappers can only exist here for backcompat, and are merged in Mapping - mergeIntoMapper.merge(mergeWithMapper, mergeResult); - } - } - if (!newFieldMappers.isEmpty()) { - mergeResult.addFieldMappers(newFieldMappers); - } - if (!newObjectMappers.isEmpty()) { - mergeResult.addObjectMappers(newObjectMappers); - } - // add the mappers only after the administration have been done, so it will not be visible to parser (which first try to read with no lock) - for (Mapper mapper : mappersToPut) { - putMapper(mapper); - } + ObjectMapper merged = clone(); + merged.doMerge(mergeWithObject, updateAllTypes); + return merged; } - protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { + protected void doMerge(final ObjectMapper mergeWith, boolean updateAllTypes) { + if (nested().isNested()) { + if (!mergeWith.nested().isNested()) { + throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from nested to non-nested"); + } + } else { + if (mergeWith.nested().isNested()) { + throw new IllegalArgumentException("object mapping [" + name() + "] can't be changed from non-nested to nested"); + } + } + if (mergeWith.dynamic != null) { + this.dynamic = mergeWith.dynamic; + } + + for (Mapper mergeWithMapper : mergeWith) { + Mapper mergeIntoMapper = mappers.get(mergeWithMapper.simpleName()); + Mapper merged; + if (mergeIntoMapper == null) { + // no mapping, simply add it + merged = mergeWithMapper; + } else { + // root mappers can only exist here for backcompat, and are merged in Mapping + merged = mergeIntoMapper.merge(mergeWithMapper, updateAllTypes); + } + putMapper(merged); + } } @Override @@ -549,9 +507,6 @@ public class ObjectMapper extends Mapper implements AllFieldMapper.IncludeInAll, if (enabled != Defaults.ENABLED) { builder.field("enabled", enabled); } - if (pathType != Defaults.PATH_TYPE) { - builder.field("path", pathType.name().toLowerCase(Locale.ROOT)); - } if (includeInAll != null) { builder.field("include_in_all", includeInAll); } diff --git a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java index a0c989abd7d..2fd4e914718 100644 --- a/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java +++ b/core/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java @@ -95,7 +95,7 @@ public class RootObjectMapper extends ObjectMapper { @Override - protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, ContentPath.Type pathType, Map mappers, @Nullable Settings settings) { + protected ObjectMapper createMapper(String name, String fullPath, boolean enabled, Nested nested, Dynamic dynamic, Map mappers, @Nullable Settings settings) { assert !nested.isNested(); FormatDateTimeFormatter[] dates = null; if (dynamicDateTimeFormatters == null) { @@ -106,7 +106,7 @@ public class RootObjectMapper extends ObjectMapper { } else { dates = dynamicDateTimeFormatters.toArray(new FormatDateTimeFormatter[dynamicDateTimeFormatters.size()]); } - return new RootObjectMapper(name, enabled, dynamic, pathType, mappers, + return new RootObjectMapper(name, enabled, dynamic, mappers, dates, dynamicTemplates.toArray(new DynamicTemplate[dynamicTemplates.size()]), dateDetection, numericDetection); @@ -196,15 +196,23 @@ public class RootObjectMapper extends ObjectMapper { private volatile DynamicTemplate dynamicTemplates[]; - RootObjectMapper(String name, boolean enabled, Dynamic dynamic, ContentPath.Type pathType, Map mappers, + RootObjectMapper(String name, boolean enabled, Dynamic dynamic, Map mappers, FormatDateTimeFormatter[] dynamicDateTimeFormatters, DynamicTemplate dynamicTemplates[], boolean dateDetection, boolean numericDetection) { - super(name, name, enabled, Nested.NO, dynamic, pathType, mappers); + super(name, name, enabled, Nested.NO, dynamic, mappers); this.dynamicTemplates = dynamicTemplates; this.dynamicDateTimeFormatters = dynamicDateTimeFormatters; this.dateDetection = dateDetection; this.numericDetection = numericDetection; } + /** Return a copy of this mapper that has the given {@code mapper} as a + * sub mapper. */ + public RootObjectMapper copyAndPutMapper(Mapper mapper) { + RootObjectMapper clone = (RootObjectMapper) clone(); + clone.putMapper(mapper); + return clone; + } + @Override public ObjectMapper mappingUpdate(Mapper mapper) { RootObjectMapper update = (RootObjectMapper) super.mappingUpdate(mapper); @@ -253,25 +261,29 @@ public class RootObjectMapper extends ObjectMapper { } @Override - protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { + public RootObjectMapper merge(Mapper mergeWith, boolean updateAllTypes) { + return (RootObjectMapper) super.merge(mergeWith, updateAllTypes); + } + + @Override + protected void doMerge(ObjectMapper mergeWith, boolean updateAllTypes) { + super.doMerge(mergeWith, updateAllTypes); RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; - if (!mergeResult.simulate()) { - // merge them - List mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates)); - for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { - boolean replaced = false; - for (int i = 0; i < mergedTemplates.size(); i++) { - if (mergedTemplates.get(i).name().equals(template.name())) { - mergedTemplates.set(i, template); - replaced = true; - } - } - if (!replaced) { - mergedTemplates.add(template); + // merge them + List mergedTemplates = new ArrayList<>(Arrays.asList(this.dynamicTemplates)); + for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { + boolean replaced = false; + for (int i = 0; i < mergedTemplates.size(); i++) { + if (mergedTemplates.get(i).name().equals(template.name())) { + mergedTemplates.set(i, template); + replaced = true; } } - this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]); + if (!replaced) { + mergedTemplates.add(template); + } } + this.dynamicTemplates = mergedTemplates.toArray(new DynamicTemplate[mergedTemplates.size()]); } @Override diff --git a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index 5aad36cd27a..454465727b7 100644 --- a/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -31,19 +31,16 @@ import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.geo.SpatialStrategy; +import org.elasticsearch.common.geo.builders.PointBuilder; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper; import org.elasticsearch.search.internal.SearchContext; @@ -61,13 +58,11 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder { String fieldName = null; ShapeRelation shapeRelation = null; SpatialStrategy strategy = null; - BytesReference shape = null; + ShapeBuilder shape = null; String id = null; String type = null; @@ -79,8 +77,7 @@ public class GeoShapeQueryParser implements QueryParser { currentFieldName = parser.currentName(); token = parser.nextToken(); if (parseContext.parseFieldMatcher().match(currentFieldName, SHAPE_FIELD)) { - XContentBuilder builder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - shape = builder.bytes(); + shape = ShapeBuilder.parse(parser); } else if (parseContext.parseFieldMatcher().match(currentFieldName, STRATEGY_FIELD)) { String strategyName = parser.text(); strategy = SpatialStrategy.fromString(strategyName); diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java deleted file mode 100644 index 70d0bb9350f..00000000000 --- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryBuilder.java +++ /dev/null @@ -1,234 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermRangeQuery; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; -import org.elasticsearch.index.mapper.object.ObjectMapper; - -import java.io.IOException; -import java.util.Collection; -import java.util.Objects; - -/** - * Constructs a filter that have only null values or no value in the original field. - */ -public class MissingQueryBuilder extends AbstractQueryBuilder { - - public static final String NAME = "missing"; - - public static final boolean DEFAULT_NULL_VALUE = false; - - public static final boolean DEFAULT_EXISTENCE_VALUE = true; - - private final String fieldPattern; - - private final boolean nullValue; - - private final boolean existence; - - static final MissingQueryBuilder PROTOTYPE = new MissingQueryBuilder("field", DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE); - - /** - * Constructs a filter that returns documents with only null values or no value in the original field. - * @param fieldPattern the field to query - * @param nullValue should the missing filter automatically include fields with null value configured in the - * mappings. Defaults to false. - * @param existence should the missing filter include documents where the field doesn't exist in the docs. - * Defaults to true. - * @throws IllegalArgumentException when both existence and nullValue are set to false - */ - public MissingQueryBuilder(String fieldPattern, boolean nullValue, boolean existence) { - if (Strings.isEmpty(fieldPattern)) { - throw new IllegalArgumentException("missing query must be provided with a [field]"); - } - if (nullValue == false && existence == false) { - throw new IllegalArgumentException("missing query must have either 'existence', or 'null_value', or both set to true"); - } - this.fieldPattern = fieldPattern; - this.nullValue = nullValue; - this.existence = existence; - } - - public MissingQueryBuilder(String fieldPattern) { - this(fieldPattern, DEFAULT_NULL_VALUE, DEFAULT_EXISTENCE_VALUE); - } - - public String fieldPattern() { - return this.fieldPattern; - } - - /** - * Returns true if the missing filter will include documents where the field contains a null value, otherwise - * these documents will not be included. - */ - public boolean nullValue() { - return this.nullValue; - } - - /** - * Returns true if the missing filter will include documents where the field has no values, otherwise - * these documents will not be included. - */ - public boolean existence() { - return this.existence; - } - - @Override - protected void doXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(NAME); - builder.field(MissingQueryParser.FIELD_FIELD.getPreferredName(), fieldPattern); - builder.field(MissingQueryParser.NULL_VALUE_FIELD.getPreferredName(), nullValue); - builder.field(MissingQueryParser.EXISTENCE_FIELD.getPreferredName(), existence); - printBoostAndQueryName(builder); - builder.endObject(); - } - - @Override - public String getWriteableName() { - return NAME; - } - - @Override - protected Query doToQuery(QueryShardContext context) throws IOException { - return newFilter(context, fieldPattern, existence, nullValue); - } - - public static Query newFilter(QueryShardContext context, String fieldPattern, boolean existence, boolean nullValue) { - if (!existence && !nullValue) { - throw new QueryShardException(context, "missing must have either existence, or null_value, or both set to true"); - } - - final FieldNamesFieldMapper.FieldNamesFieldType fieldNamesFieldType = (FieldNamesFieldMapper.FieldNamesFieldType) context.getMapperService().fullName(FieldNamesFieldMapper.NAME); - if (fieldNamesFieldType == null) { - // can only happen when no types exist, so no docs exist either - return Queries.newMatchNoDocsQuery(); - } - - ObjectMapper objectMapper = context.getObjectMapper(fieldPattern); - if (objectMapper != null) { - // automatic make the object mapper pattern - fieldPattern = fieldPattern + ".*"; - } - - Collection fields = context.simpleMatchToIndexNames(fieldPattern); - if (fields.isEmpty()) { - if (existence) { - // if we ask for existence of fields, and we found none, then we should match on all - return Queries.newMatchAllQuery(); - } - return null; - } - - Query existenceFilter = null; - Query nullFilter = null; - - if (existence) { - BooleanQuery.Builder boolFilter = new BooleanQuery.Builder(); - for (String field : fields) { - MappedFieldType fieldType = context.fieldMapper(field); - Query filter = null; - if (fieldNamesFieldType.isEnabled()) { - final String f; - if (fieldType != null) { - f = fieldType.names().indexName(); - } else { - f = field; - } - filter = fieldNamesFieldType.termQuery(f, context); - } - // if _field_names are not indexed, we need to go the slow way - if (filter == null && fieldType != null) { - filter = fieldType.rangeQuery(null, null, true, true); - } - if (filter == null) { - filter = new TermRangeQuery(field, null, null, true, true); - } - boolFilter.add(filter, BooleanClause.Occur.SHOULD); - } - - existenceFilter = boolFilter.build(); - existenceFilter = Queries.not(existenceFilter);; - } - - if (nullValue) { - for (String field : fields) { - MappedFieldType fieldType = context.fieldMapper(field); - if (fieldType != null) { - nullFilter = fieldType.nullValueQuery(); - } - } - } - - Query filter; - if (nullFilter != null) { - if (existenceFilter != null) { - filter = new BooleanQuery.Builder() - .add(existenceFilter, BooleanClause.Occur.SHOULD) - .add(nullFilter, BooleanClause.Occur.SHOULD) - .build(); - } else { - filter = nullFilter; - } - } else { - filter = existenceFilter; - } - - if (filter == null) { - return null; - } - - return new ConstantScoreQuery(filter); - } - - @Override - protected MissingQueryBuilder doReadFrom(StreamInput in) throws IOException { - return new MissingQueryBuilder(in.readString(), in.readBoolean(), in.readBoolean()); - } - - @Override - protected void doWriteTo(StreamOutput out) throws IOException { - out.writeString(fieldPattern); - out.writeBoolean(nullValue); - out.writeBoolean(existence); - } - - @Override - protected int doHashCode() { - return Objects.hash(fieldPattern, nullValue, existence); - } - - @Override - protected boolean doEquals(MissingQueryBuilder other) { - return Objects.equals(fieldPattern, other.fieldPattern) && - Objects.equals(nullValue, other.nullValue) && - Objects.equals(existence, other.existence); - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java b/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java deleted file mode 100644 index 467971b65ca..00000000000 --- a/core/src/main/java/org/elasticsearch/index/query/MissingQueryParser.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; - -/** - * Parser for missing query - */ -public class MissingQueryParser implements QueryParser { - - public static final ParseField FIELD_FIELD = new ParseField("field"); - public static final ParseField NULL_VALUE_FIELD = new ParseField("null_value"); - public static final ParseField EXISTENCE_FIELD = new ParseField("existence"); - - @Override - public String[] names() { - return new String[]{MissingQueryBuilder.NAME}; - } - - @Override - public MissingQueryBuilder fromXContent(QueryParseContext parseContext) throws IOException { - XContentParser parser = parseContext.parser(); - - String fieldPattern = null; - String queryName = null; - float boost = AbstractQueryBuilder.DEFAULT_BOOST; - boolean nullValue = MissingQueryBuilder.DEFAULT_NULL_VALUE; - boolean existence = MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE; - - XContentParser.Token token; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(currentFieldName, FIELD_FIELD)) { - fieldPattern = parser.text(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NULL_VALUE_FIELD)) { - nullValue = parser.booleanValue(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, EXISTENCE_FIELD)) { - existence = parser.booleanValue(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.NAME_FIELD)) { - queryName = parser.text(); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, AbstractQueryBuilder.BOOST_FIELD)) { - boost = parser.floatValue(); - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] query does not support [" + currentFieldName + "]"); - } - } else { - throw new ParsingException(parser.getTokenLocation(), "[" + MissingQueryBuilder.NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); - } - } - - if (fieldPattern == null) { - throw new ParsingException(parser.getTokenLocation(), "missing must be provided with a [field]"); - } - return new MissingQueryBuilder(fieldPattern, nullValue, existence) - .boost(boost) - .queryName(queryName); - } - - @Override - public MissingQueryBuilder getBuilderPrototype() { - return MissingQueryBuilder.PROTOTYPE; - } -} diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 45f97d68c3f..3fb09679204 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -810,27 +810,6 @@ public abstract class QueryBuilders { return new ExistsQueryBuilder(name); } - /** - * A filter to filter only documents where a field does not exists in them. - * @param name the field to query - */ - public static MissingQueryBuilder missingQuery(String name) { - return missingQuery(name, MissingQueryBuilder.DEFAULT_NULL_VALUE, MissingQueryBuilder.DEFAULT_EXISTENCE_VALUE); - } - - /** - * A filter to filter only documents where a field does not exists in them. - * @param name the field to query - * @param nullValue should the missing filter automatically include fields with null value configured in the - * mappings. Defaults to false. - * @param existence should the missing filter include documents where the field doesn't exist in the docs. - * Defaults to true. - * @throws IllegalArgumentException when both existence and nullValue are set to false - */ - public static MissingQueryBuilder missingQuery(String name, boolean nullValue, boolean existence) { - return new MissingQueryBuilder(name, nullValue, existence); - } - private QueryBuilders() { } diff --git a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java index 65dfb559e3f..faf482ead94 100644 --- a/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java +++ b/core/src/main/java/org/elasticsearch/index/query/QueryShardContext.java @@ -63,6 +63,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -364,7 +365,7 @@ public class QueryShardContext { * Executes the given template, and returns the response. */ public BytesReference executeQueryTemplate(Template template, SearchContext searchContext) { - ExecutableScript executable = getScriptService().executable(template, ScriptContext.Standard.SEARCH, searchContext); + ExecutableScript executable = getScriptService().executable(template, ScriptContext.Standard.SEARCH, searchContext, Collections.emptyMap()); return (BytesReference) executable.run(); } diff --git a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index f69ac8c0548..6f14f15d3f2 100644 --- a/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.script.Script.ScriptField; import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; +import java.util.Collections; import java.util.Objects; public class ScriptQueryBuilder extends AbstractQueryBuilder { @@ -80,7 +81,7 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder public ScriptQuery(Script script, ScriptService scriptService, SearchLookup searchLookup) { this.script = script; - this.searchScript = scriptService.search(searchLookup, script, ScriptContext.Standard.SEARCH); + this.searchScript = scriptService.search(searchLookup, script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); } @Override @@ -161,4 +162,4 @@ public class ScriptQueryBuilder extends AbstractQueryBuilder protected boolean doEquals(ScriptQueryBuilder other) { return Objects.equals(script, other.script); } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java index c528c0007f2..e7ce9b90e2b 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java @@ -19,13 +19,11 @@ package org.elasticsearch.index.query.functionscore; -import java.util.Map; - import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.XContentLocation; -import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.functionscore.exp.ExponentialDecayFunctionParser; import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionParser; import org.elasticsearch.index.query.functionscore.gauss.GaussDecayFunctionParser; @@ -74,11 +72,12 @@ public class ScoreFunctionParserMapper { return functionParsers.get(parserName); } - private static void addParser(ScoreFunctionParser scoreFunctionParser, Map> map, NamedWriteableRegistry namedWriteableRegistry) { + private static void addParser(ScoreFunctionParser scoreFunctionParser, Map> map, NamedWriteableRegistry namedWriteableRegistry) { for (String name : scoreFunctionParser.getNames()) { map.put(name, scoreFunctionParser); } - namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, scoreFunctionParser.getBuilderPrototype()); + @SuppressWarnings("unchecked") NamedWriteable sfb = scoreFunctionParser.getBuilderPrototype(); + namedWriteableRegistry.registerPrototype(ScoreFunctionBuilder.class, sfb); } } diff --git a/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java b/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java index 92308466312..5fcd70b65dc 100644 --- a/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java +++ b/core/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionBuilder.java @@ -33,6 +33,7 @@ import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.SearchScript; import java.io.IOException; +import java.util.Collections; import java.util.Objects; /** @@ -89,10 +90,10 @@ public class ScriptScoreFunctionBuilder extends ScoreFunctionBuilder refreshScheduledFuture; - private volatile ScheduledFuture mergeScheduleFuture; protected volatile ShardRouting shardRouting; protected volatile IndexShardState state; protected final AtomicReference currentEngineReference = new AtomicReference<>(); @@ -766,8 +788,6 @@ public class IndexShard extends AbstractIndexShardComponent { if (state != IndexShardState.CLOSED) { FutureUtils.cancel(refreshScheduledFuture); refreshScheduledFuture = null; - FutureUtils.cancel(mergeScheduleFuture); - mergeScheduleFuture = null; } changeState(IndexShardState.CLOSED, reason); indexShardOperationCounter.decRef(); @@ -1099,7 +1119,8 @@ public class IndexShard extends AbstractIndexShardComponent { // we are the first primary, recover from the gateway // if its post api allocation, the index should exists assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; - final boolean shouldExist = shardRouting.allocatedPostIndexCreate(); + boolean shouldExist = shardRouting.allocatedPostIndexCreate(idxSettings.getIndexMetaData()); + StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); return storeRecovery.recoverFromStore(this, shouldExist, localNode); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index ac46f6725de..88e55600bc9 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/core/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -110,7 +110,7 @@ public class TranslogRecoveryPerformer { if (currentUpdate == null) { recoveredTypes.put(type, update); } else { - MapperUtils.merge(currentUpdate, update); + currentUpdate = currentUpdate.merge(update, false); } } diff --git a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java index 1bd023abdb0..ed561876735 100644 --- a/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java +++ b/core/src/main/java/org/elasticsearch/index/store/IndexStoreConfig.java @@ -21,37 +21,36 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.StoreRateLimiting; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.node.settings.NodeSettingsService; /** * IndexStoreConfig encapsulates node / cluster level configuration for index level {@link IndexStore} instances. * For instance it maintains the node level rate limiter configuration: updates to the cluster that disable or enable - * {@value #INDICES_STORE_THROTTLE_TYPE} or {@value #INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC} are reflected immediately + * indices.store.throttle.type or indices.store.throttle.max_bytes_per_sec are reflected immediately * on all referencing {@link IndexStore} instances */ -public class IndexStoreConfig implements NodeSettingsService.Listener { +public class IndexStoreConfig{ /** * Configures the node / cluster level throttle type. See {@link StoreRateLimiting.Type}. */ - public static final String INDICES_STORE_THROTTLE_TYPE = "indices.store.throttle.type"; + public static final Setting INDICES_STORE_THROTTLE_TYPE_SETTING = new Setting<>("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name(),StoreRateLimiting.Type::fromString, true, Setting.Scope.CLUSTER); /** * Configures the node / cluster level throttle intensity. The default is 10240 MB */ - public static final String INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC = "indices.store.throttle.max_bytes_per_sec"; - private volatile String rateLimitingType; + public static final Setting INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0), true, Setting.Scope.CLUSTER); + private volatile StoreRateLimiting.Type rateLimitingType; private volatile ByteSizeValue rateLimitingThrottle; private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); private final ESLogger logger; public IndexStoreConfig(Settings settings) { logger = Loggers.getLogger(IndexStoreConfig.class, settings); // we don't limit by default (we default to CMS's auto throttle instead): - this.rateLimitingType = settings.get("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name()); + this.rateLimitingType = INDICES_STORE_THROTTLE_TYPE_SETTING.get(settings); rateLimiting.setType(rateLimitingType); - this.rateLimitingThrottle = settings.getAsBytesSize("indices.store.throttle.max_bytes_per_sec", new ByteSizeValue(0)); + this.rateLimitingThrottle = INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.get(settings); rateLimiting.setMaxRate(rateLimitingThrottle); logger.debug("using indices.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle); } @@ -63,22 +62,12 @@ public class IndexStoreConfig implements NodeSettingsService.Listener { return rateLimiting; } - @Override - public void onRefreshSettings(Settings settings) { - String rateLimitingType = settings.get(INDICES_STORE_THROTTLE_TYPE, this.rateLimitingType); - // try and parse the type - StoreRateLimiting.Type.fromString(rateLimitingType); - if (!rateLimitingType.equals(this.rateLimitingType)) { - logger.info("updating indices.store.throttle.type from [{}] to [{}]", this.rateLimitingType, rateLimitingType); - this.rateLimitingType = rateLimitingType; - this.rateLimiting.setType(rateLimitingType); - } + public void setRateLimitingType(StoreRateLimiting.Type rateLimitingType) { + this.rateLimitingType = rateLimitingType; + rateLimiting.setType(rateLimitingType); + } - ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, this.rateLimitingThrottle); - if (!rateLimitingThrottle.equals(this.rateLimitingThrottle)) { - logger.info("updating indices.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", this.rateLimitingThrottle, rateLimitingThrottle, this.rateLimitingType); - this.rateLimitingThrottle = rateLimitingThrottle; - this.rateLimiting.setMaxRate(rateLimitingThrottle); - } + public void setRateLimitingThrottle(ByteSizeValue rateLimitingThrottle) { + this.rateLimitingThrottle = rateLimitingThrottle; } } diff --git a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java index 6026468973a..a2eb0bff646 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java +++ b/core/src/main/java/org/elasticsearch/index/translog/BufferingTranslogWriter.java @@ -48,22 +48,27 @@ public final class BufferingTranslogWriter extends TranslogWriter { public Translog.Location add(BytesReference data) throws IOException { try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); - operationCounter++; final long offset = totalOffset; if (data.length() >= buffer.length) { flush(); // we use the channel to write, since on windows, writing to the RAF might not be reflected // when reading through the channel - data.writeTo(channel); + try { + data.writeTo(channel); + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } writtenOffset += data.length(); totalOffset += data.length(); - return new Translog.Location(generation, offset, data.length()); + } else { + if (data.length() > buffer.length - bufferCount) { + flush(); + } + data.writeTo(bufferOs); + totalOffset += data.length(); } - if (data.length() > buffer.length - bufferCount) { - flush(); - } - data.writeTo(bufferOs); - totalOffset += data.length(); + operationCounter++; return new Translog.Location(generation, offset, data.length()); } } @@ -71,10 +76,17 @@ public final class BufferingTranslogWriter extends TranslogWriter { protected final void flush() throws IOException { assert writeLock.isHeldByCurrentThread(); if (bufferCount > 0) { + ensureOpen(); // we use the channel to write, since on windows, writing to the RAF might not be reflected // when reading through the channel - Channels.writeToChannel(buffer, 0, bufferCount, channel); - writtenOffset += bufferCount; + final int bufferSize = bufferCount; + try { + Channels.writeToChannel(buffer, 0, bufferSize, channel); + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } + writtenOffset += bufferSize; bufferCount = 0; } } @@ -102,20 +114,28 @@ public final class BufferingTranslogWriter extends TranslogWriter { } @Override - public void sync() throws IOException { - if (!syncNeeded()) { - return; - } - synchronized (this) { + public synchronized void sync() throws IOException { + if (syncNeeded()) { + ensureOpen(); // this call gives a better exception that the incRef if we are closed by a tragic event channelReference.incRef(); try { + final long offsetToSync; + final int opsCounter; try (ReleasableLock lock = writeLock.acquire()) { flush(); - lastSyncedOffset = totalOffset; + offsetToSync = totalOffset; + opsCounter = operationCounter; } // we can do this outside of the write lock but we have to protect from // concurrent syncs - checkpoint(lastSyncedOffset, operationCounter, channelReference); + ensureOpen(); // just for kicks - the checkpoint happens or not either way + try { + checkpoint(offsetToSync, opsCounter, channelReference); + } catch (Throwable ex) { + closeWithTragicEvent(ex); + throw ex; + } + lastSyncedOffset = offsetToSync; } finally { channelReference.decRef(); } diff --git a/core/src/main/java/org/elasticsearch/index/translog/Translog.java b/core/src/main/java/org/elasticsearch/index/translog/Translog.java index 35dd895bc2e..fd5c64f96ac 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/core/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -115,7 +115,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final Path location; private TranslogWriter current; private volatile ImmutableTranslogReader currentCommittingTranslog; - private long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion. + private volatile long lastCommittedTranslogFileGeneration = -1; // -1 is safe as it will not cause an translog deletion. private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final String translogUUID; @@ -158,7 +158,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC try { if (translogGeneration != null) { - final Checkpoint checkpoint = Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); + final Checkpoint checkpoint = readCheckpoint(); this.recoveredTranslogs = recoverFromFiles(translogGeneration, checkpoint); if (recoveredTranslogs.isEmpty()) { throw new IllegalStateException("at least one reader must be recovered"); @@ -279,7 +279,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } } - boolean isOpen() { + /** Returns {@code true} if this {@code Translog} is still open. */ + public boolean isOpen() { return closed.get() == false; } @@ -288,10 +289,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC if (closed.compareAndSet(false, true)) { try (ReleasableLock lock = writeLock.acquire()) { try { - IOUtils.close(current, currentCommittingTranslog); + current.sync(); } finally { - IOUtils.close(recoveredTranslogs); - recoveredTranslogs.clear(); + try { + IOUtils.close(current, currentCommittingTranslog); + } finally { + IOUtils.close(recoveredTranslogs); + recoveredTranslogs.clear(); + } } } finally { FutureUtils.cancel(syncScheduler); @@ -354,7 +359,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC TranslogWriter createWriter(long fileGeneration) throws IOException { TranslogWriter newFile; try { - newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize()); + newFile = TranslogWriter.create(config.getType(), shardId, translogUUID, fileGeneration, location.resolve(getFilename(fileGeneration)), new OnCloseRunnable(), config.getBufferSize(), getChannelFactory()); } catch (IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -393,7 +398,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * @see Index * @see org.elasticsearch.index.translog.Translog.Delete */ - public Location add(Operation operation) throws TranslogException { + public Location add(Operation operation) throws IOException { final ReleasableBytesStreamOutput out = new ReleasableBytesStreamOutput(bigArrays); try { final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(out); @@ -415,9 +420,11 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC assert current.assertBytesAtLocation(location, bytes); return location; } - } catch (AlreadyClosedException ex) { + } catch (AlreadyClosedException | IOException ex) { + closeOnTragicEvent(ex); throw ex; } catch (Throwable e) { + closeOnTragicEvent(e); throw new TranslogException(shardId, "Failed to write operation [" + operation + "]", e); } finally { Releasables.close(out.bytes()); @@ -429,6 +436,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * Snapshots are fixed in time and will not be updated with future operations. */ public Snapshot newSnapshot() { + ensureOpen(); try (ReleasableLock lock = readLock.acquire()) { ArrayList toOpen = new ArrayList<>(); toOpen.addAll(recoveredTranslogs); @@ -493,6 +501,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC if (closed.get() == false) { current.sync(); } + } catch (Throwable ex) { + closeOnTragicEvent(ex); + throw ex; } } @@ -520,12 +531,26 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public boolean ensureSynced(Location location) throws IOException { try (ReleasableLock lock = readLock.acquire()) { if (location.generation == current.generation) { // if we have a new one it's already synced + ensureOpen(); return current.syncUpTo(location.translogLocation + location.size); } + } catch (Throwable ex) { + closeOnTragicEvent(ex); + throw ex; } return false; } + private void closeOnTragicEvent(Throwable ex) { + if (current.getTragicException() != null) { + try { + close(); + } catch (Exception inner) { + ex.addSuppressed(inner); + } + } + } + /** * return stats */ @@ -548,31 +573,29 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final class OnCloseRunnable implements Callback { @Override public void handle(ChannelReference channelReference) { - try (ReleasableLock lock = writeLock.acquire()) { - if (isReferencedGeneration(channelReference.getGeneration()) == false) { - Path translogPath = channelReference.getPath(); - assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath; - // if the given translogPath is not the current we can safely delete the file since all references are released - logger.trace("delete translog file - not referenced and not current anymore {}", translogPath); - IOUtils.deleteFilesIgnoringExceptions(translogPath); - IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); + if (isReferencedGeneration(channelReference.getGeneration()) == false) { + Path translogPath = channelReference.getPath(); + assert channelReference.getPath().getParent().equals(location) : "translog files must be in the location folder: " + location + " but was: " + translogPath; + // if the given translogPath is not the current we can safely delete the file since all references are released + logger.trace("delete translog file - not referenced and not current anymore {}", translogPath); + IOUtils.deleteFilesIgnoringExceptions(translogPath); + IOUtils.deleteFilesIgnoringExceptions(translogPath.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); - } - try (DirectoryStream stream = Files.newDirectoryStream(location)) { - for (Path path : stream) { - Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString()); - if (matcher.matches()) { - long generation = Long.parseLong(matcher.group(1)); - if (isReferencedGeneration(generation) == false) { - logger.trace("delete translog file - not referenced and not current anymore {}", path); - IOUtils.deleteFilesIgnoringExceptions(path); - IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); - } + } + try (DirectoryStream stream = Files.newDirectoryStream(location)) { + for (Path path : stream) { + Matcher matcher = PARSE_STRICT_ID_PATTERN.matcher(path.getFileName().toString()); + if (matcher.matches()) { + long generation = Long.parseLong(matcher.group(1)); + if (isReferencedGeneration(generation) == false) { + logger.trace("delete translog file - not referenced and not current anymore {}", path); + IOUtils.deleteFilesIgnoringExceptions(path); + IOUtils.deleteFilesIgnoringExceptions(path.resolveSibling(getCommitCheckpointFileName(channelReference.getGeneration()))); } } - } catch (IOException e) { - logger.warn("failed to delete unreferenced translog files", e); } + } catch (IOException e) { + logger.warn("failed to delete unreferenced translog files", e); } } } @@ -1294,6 +1317,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC throw new IllegalStateException("already committing a translog with generation: " + currentCommittingTranslog.getGeneration()); } final TranslogWriter oldCurrent = current; + oldCurrent.ensureOpen(); oldCurrent.sync(); currentCommittingTranslog = current.immutableReader(); Path checkpoint = location.resolve(CHECKPOINT_FILE_NAME); @@ -1389,7 +1413,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private void ensureOpen() { if (closed.get()) { - throw new AlreadyClosedException("translog is already closed"); + throw new AlreadyClosedException("translog is already closed", current.getTragicException()); } } @@ -1400,4 +1424,20 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return outstandingViews.size(); } + TranslogWriter.ChannelFactory getChannelFactory() { + return TranslogWriter.ChannelFactory.DEFAULT; + } + + /** If this {@code Translog} was closed as a side-effect of a tragic exception, + * e.g. disk full while flushing a new segment, this returns the root cause exception. + * Otherwise (no tragic exception has occurred) it returns null. */ + public Throwable getTragicException() { + return current.getTragicException(); + } + + /** Reads and returns the current checkpoint */ + final Checkpoint readCheckpoint() throws IOException { + return Checkpoint.read(location.resolve(CHECKPOINT_FILE_NAME)); + } + } diff --git a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index 590bc319057..d7077fd90ad 100644 --- a/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/core/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -140,16 +140,16 @@ public abstract class TranslogReader implements Closeable, Comparable onClose, int bufferSize) throws IOException { + public static TranslogWriter create(Type type, ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback onClose, int bufferSize, ChannelFactory channelFactory) throws IOException { final BytesRef ref = new BytesRef(translogUUID); final int headerLength = CodecUtil.headerLength(TRANSLOG_CODEC) + ref.length + RamUsageEstimator.NUM_BYTES_INT; - final FileChannel channel = FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); + final FileChannel channel = channelFactory.open(file); try { // This OutputStreamDataOutput is intentionally not closed because // closing it will close the FileChannel @@ -90,6 +94,12 @@ public class TranslogWriter extends TranslogReader { throw throwable; } } + /** If this {@code TranslogWriter} was closed as a side-effect of a tragic exception, + * e.g. disk full while flushing a new segment, this returns the root cause exception. + * Otherwise (no tragic exception has occurred) it returns null. */ + public Throwable getTragicException() { + return tragedy; + } public enum Type { @@ -118,6 +128,16 @@ public class TranslogWriter extends TranslogReader { } } + protected final void closeWithTragicEvent(Throwable throwable) throws IOException { + try (ReleasableLock lock = writeLock.acquire()) { + if (tragedy == null) { + tragedy = throwable; + } else { + tragedy.addSuppressed(throwable); + } + close(); + } + } /** * add the given bytes to the translog and return the location they were written at @@ -127,9 +147,14 @@ public class TranslogWriter extends TranslogReader { try (ReleasableLock lock = writeLock.acquire()) { ensureOpen(); position = writtenOffset; - data.writeTo(channel); + try { + data.writeTo(channel); + } catch (Throwable e) { + closeWithTragicEvent(e); + throw e; + } writtenOffset = writtenOffset + data.length(); - operationCounter = operationCounter + 1; + operationCounter++;; } return new Translog.Location(generation, position, data.length()); } @@ -143,12 +168,13 @@ public class TranslogWriter extends TranslogReader { /** * write all buffered ops to disk and fsync file */ - public void sync() throws IOException { + public synchronized void sync() throws IOException { // synchronized to ensure only one sync happens a time // check if we really need to sync here... if (syncNeeded()) { try (ReleasableLock lock = writeLock.acquire()) { + ensureOpen(); + checkpoint(writtenOffset, operationCounter, channelReference); lastSyncedOffset = writtenOffset; - checkpoint(lastSyncedOffset, operationCounter, channelReference); } } } @@ -262,15 +288,6 @@ public class TranslogWriter extends TranslogReader { return false; } - @Override - protected final void doClose() throws IOException { - try (ReleasableLock lock = writeLock.acquire()) { - sync(); - } finally { - super.doClose(); - } - } - @Override protected void readBytes(ByteBuffer buffer, long position) throws IOException { try (ReleasableLock lock = readLock.acquire()) { @@ -288,4 +305,20 @@ public class TranslogWriter extends TranslogReader { Checkpoint checkpoint = new Checkpoint(syncPosition, numOperations, generation); Checkpoint.write(checkpointFile, checkpoint, options); } + + static class ChannelFactory { + + static final ChannelFactory DEFAULT = new ChannelFactory(); + + // only for testing until we have a disk-full FileSystemt + public FileChannel open(Path file) throws IOException { + return FileChannel.open(file, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); + } + } + + protected final void ensureOpen() { + if (isClosed()) { + throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed", tragedy); + } + } } diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index cdd7f050331..61210bb0413 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -22,18 +22,86 @@ package org.elasticsearch.indices; import org.elasticsearch.action.update.UpdateHelper; import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService; import org.elasticsearch.common.geo.ShapesAvailability; +import org.elasticsearch.common.geo.builders.ShapeBuilderRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.util.ExtensionPoint; import org.elasticsearch.index.NodeServicesProvider; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MetadataFieldMapper; -import org.elasticsearch.index.mapper.core.*; +import org.elasticsearch.index.mapper.core.BinaryFieldMapper; +import org.elasticsearch.index.mapper.core.BooleanFieldMapper; +import org.elasticsearch.index.mapper.core.ByteFieldMapper; +import org.elasticsearch.index.mapper.core.CompletionFieldMapper; +import org.elasticsearch.index.mapper.core.DateFieldMapper; +import org.elasticsearch.index.mapper.core.DoubleFieldMapper; +import org.elasticsearch.index.mapper.core.FloatFieldMapper; +import org.elasticsearch.index.mapper.core.IntegerFieldMapper; +import org.elasticsearch.index.mapper.core.LongFieldMapper; +import org.elasticsearch.index.mapper.core.ShortFieldMapper; +import org.elasticsearch.index.mapper.core.StringFieldMapper; +import org.elasticsearch.index.mapper.core.TokenCountFieldMapper; +import org.elasticsearch.index.mapper.core.TypeParsers; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; import org.elasticsearch.index.mapper.geo.GeoShapeFieldMapper; -import org.elasticsearch.index.mapper.internal.*; +import org.elasticsearch.index.mapper.internal.AllFieldMapper; +import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; +import org.elasticsearch.index.mapper.internal.IdFieldMapper; +import org.elasticsearch.index.mapper.internal.IndexFieldMapper; +import org.elasticsearch.index.mapper.internal.ParentFieldMapper; +import org.elasticsearch.index.mapper.internal.RoutingFieldMapper; +import org.elasticsearch.index.mapper.internal.SourceFieldMapper; +import org.elasticsearch.index.mapper.internal.TTLFieldMapper; +import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.ip.IpFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.query.*; +import org.elasticsearch.index.query.BoolQueryParser; +import org.elasticsearch.index.query.BoostingQueryParser; +import org.elasticsearch.index.query.CommonTermsQueryParser; +import org.elasticsearch.index.query.ConstantScoreQueryParser; +import org.elasticsearch.index.query.DisMaxQueryParser; +import org.elasticsearch.index.query.ExistsQueryParser; +import org.elasticsearch.index.query.FieldMaskingSpanQueryParser; +import org.elasticsearch.index.query.FuzzyQueryParser; +import org.elasticsearch.index.query.GeoBoundingBoxQueryParser; +import org.elasticsearch.index.query.GeoDistanceQueryParser; +import org.elasticsearch.index.query.GeoDistanceRangeQueryParser; +import org.elasticsearch.index.query.GeoPolygonQueryParser; +import org.elasticsearch.index.query.GeoShapeQueryParser; +import org.elasticsearch.index.query.GeohashCellQuery; +import org.elasticsearch.index.query.HasChildQueryParser; +import org.elasticsearch.index.query.HasParentQueryParser; +import org.elasticsearch.index.query.IdsQueryParser; +import org.elasticsearch.index.query.IndicesQueryParser; +import org.elasticsearch.index.query.MatchAllQueryParser; +import org.elasticsearch.index.query.MatchNoneQueryParser; +import org.elasticsearch.index.query.MatchQueryParser; +import org.elasticsearch.index.query.MoreLikeThisQueryParser; +import org.elasticsearch.index.query.MultiMatchQueryParser; +import org.elasticsearch.index.query.NestedQueryParser; +import org.elasticsearch.index.query.PrefixQueryParser; +import org.elasticsearch.index.query.QueryParser; +import org.elasticsearch.index.query.QueryStringQueryParser; +import org.elasticsearch.index.query.RangeQueryParser; +import org.elasticsearch.index.query.RegexpQueryParser; +import org.elasticsearch.index.query.ScriptQueryParser; +import org.elasticsearch.index.query.SimpleQueryStringParser; +import org.elasticsearch.index.query.SpanContainingQueryParser; +import org.elasticsearch.index.query.SpanFirstQueryParser; +import org.elasticsearch.index.query.SpanMultiTermQueryParser; +import org.elasticsearch.index.query.SpanNearQueryParser; +import org.elasticsearch.index.query.SpanNotQueryParser; +import org.elasticsearch.index.query.SpanOrQueryParser; +import org.elasticsearch.index.query.SpanTermQueryParser; +import org.elasticsearch.index.query.SpanWithinQueryParser; +import org.elasticsearch.index.query.TemplateQueryParser; +import org.elasticsearch.index.query.TermQueryParser; +import org.elasticsearch.index.query.TermsQueryParser; +import org.elasticsearch.index.query.TypeQueryParser; +import org.elasticsearch.index.query.WildcardQueryParser; +import org.elasticsearch.index.query.WrapperQueryParser; import org.elasticsearch.index.query.functionscore.FunctionScoreQueryParser; import org.elasticsearch.index.termvectors.TermVectorsService; import org.elasticsearch.indices.cache.query.IndicesQueryCache; @@ -120,10 +188,9 @@ public class IndicesModule extends AbstractModule { registerQueryParser(GeohashCellQuery.Parser.class); registerQueryParser(GeoPolygonQueryParser.class); registerQueryParser(ExistsQueryParser.class); - registerQueryParser(MissingQueryParser.class); registerQueryParser(MatchNoneQueryParser.class); - if (ShapesAvailability.JTS_AVAILABLE) { + if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerQueryParser(GeoShapeQueryParser.class); } } @@ -147,7 +214,7 @@ public class IndicesModule extends AbstractModule { registerMapper(CompletionFieldMapper.CONTENT_TYPE, new CompletionFieldMapper.TypeParser()); registerMapper(GeoPointFieldMapper.CONTENT_TYPE, new GeoPointFieldMapper.TypeParser()); - if (ShapesAvailability.JTS_AVAILABLE) { + if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerMapper(GeoShapeFieldMapper.CONTENT_TYPE, new GeoShapeFieldMapper.TypeParser()); } } @@ -219,6 +286,7 @@ public class IndicesModule extends AbstractModule { bind(IndicesFieldDataCacheListener.class).asEagerSingleton(); bind(TermVectorsService.class).asEagerSingleton(); bind(NodeServicesProvider.class).asEagerSingleton(); + bind(ShapeBuilderRegistry.class).asEagerSingleton(); } // public for testing diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesService.java b/core/src/main/java/org/elasticsearch/indices/IndicesService.java index dead72aee8b..d8c142f478d 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.FileSystemUtils; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -58,7 +59,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.indices.mapper.MapperRegistry; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.plugins.PluginsService; import java.io.IOException; @@ -100,9 +100,9 @@ public class IndicesService extends AbstractLifecycleComponent i @Inject public IndicesService(Settings settings, PluginsService pluginsService, NodeEnvironment nodeEnv, - NodeSettingsService nodeSettingsService, AnalysisRegistry analysisRegistry, - IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, - ClusterService clusterService, MapperRegistry mapperRegistry) { + ClusterSettings clusterSettings, AnalysisRegistry analysisRegistry, + IndicesQueriesRegistry indicesQueriesRegistry, IndexNameExpressionResolver indexNameExpressionResolver, + ClusterService clusterService, MapperRegistry mapperRegistry) { super(settings); this.pluginsService = pluginsService; this.nodeEnv = nodeEnv; @@ -113,7 +113,9 @@ public class IndicesService extends AbstractLifecycleComponent i this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; this.mapperRegistry = mapperRegistry; - nodeSettingsService.addListener(indexStoreConfig); + clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING, indexStoreConfig::setRateLimitingType); + clusterSettings.addSettingsUpdateConsumer(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING, indexStoreConfig::setRateLimitingThrottle); + } @Override diff --git a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 33f3c127d67..0e1532bc6b3 100644 --- a/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/core/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -25,9 +25,10 @@ import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.node.settings.NodeSettingsService; import java.util.ArrayList; import java.util.List; @@ -45,25 +46,17 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final ConcurrentMap breakers = new ConcurrentHashMap(); - // Old pre-1.4.0 backwards compatible settings - public static final String OLD_CIRCUIT_BREAKER_MAX_BYTES_SETTING = "indices.fielddata.breaker.limit"; - public static final String OLD_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.fielddata.breaker.overhead"; + public static final Setting TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.total.limit", "70%", true, Setting.Scope.CLUSTER); - public static final String TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.total.limit"; - public static final String DEFAULT_TOTAL_CIRCUIT_BREAKER_LIMIT = "70%"; + public static final Setting FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.fielddata.limit", "60%", true, Setting.Scope.CLUSTER); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.fielddata.overhead", 1.03d, 0.0d, true, Setting.Scope.CLUSTER); + public static final Setting FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.fielddata.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); - public static final String FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.fielddata.limit"; - public static final String FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.breaker.fielddata.overhead"; - public static final String FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.fielddata.type"; - public static final String DEFAULT_FIELDDATA_BREAKER_LIMIT = "60%"; - public static final double DEFAULT_FIELDDATA_OVERHEAD_CONSTANT = 1.03; + public static final Setting REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = Setting.byteSizeSetting("indices.breaker.request.limit", "40%", true, Setting.Scope.CLUSTER); + public static final Setting REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = Setting.doubleSetting("indices.breaker.request.overhead", 1.0d, 0.0d, true, Setting.Scope.CLUSTER); + public static final Setting REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = new Setting<>("indices.breaker.request.type", "memory", CircuitBreaker.Type::parseValue, false, Setting.Scope.CLUSTER); - public static final String REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING = "indices.breaker.request.limit"; - public static final String REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING = "indices.breaker.request.overhead"; - public static final String REQUEST_CIRCUIT_BREAKER_TYPE_SETTING = "indices.breaker.request.type"; - public static final String DEFAULT_REQUEST_BREAKER_LIMIT = "40%"; - public static final String DEFAULT_BREAKER_TYPE = "memory"; private volatile BreakerSettings parentSettings; private volatile BreakerSettings fielddataSettings; @@ -73,41 +66,21 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { private final AtomicLong parentTripCount = new AtomicLong(0); @Inject - public HierarchyCircuitBreakerService(Settings settings, NodeSettingsService nodeSettingsService) { + public HierarchyCircuitBreakerService(Settings settings, ClusterSettings clusterSettings) { super(settings); - - // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_MAX_BYTES_SETTING - // setting to keep backwards compatibility with 1.3, it can be safely - // removed when compatibility with 1.3 is no longer needed - String compatibilityFielddataLimitDefault = DEFAULT_FIELDDATA_BREAKER_LIMIT; - ByteSizeValue compatibilityFielddataLimit = settings.getAsMemory(OLD_CIRCUIT_BREAKER_MAX_BYTES_SETTING, null); - if (compatibilityFielddataLimit != null) { - compatibilityFielddataLimitDefault = compatibilityFielddataLimit.toString(); - } - - // This uses the old InternalCircuitBreakerService.CIRCUIT_BREAKER_OVERHEAD_SETTING - // setting to keep backwards compatibility with 1.3, it can be safely - // removed when compatibility with 1.3 is no longer needed - double compatibilityFielddataOverheadDefault = DEFAULT_FIELDDATA_OVERHEAD_CONSTANT; - Double compatibilityFielddataOverhead = settings.getAsDouble(OLD_CIRCUIT_BREAKER_OVERHEAD_SETTING, null); - if (compatibilityFielddataOverhead != null) { - compatibilityFielddataOverheadDefault = compatibilityFielddataOverhead; - } - this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, - settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, compatibilityFielddataLimitDefault).bytes(), - settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, compatibilityFielddataOverheadDefault), - CircuitBreaker.Type.parseValue(settings.get(FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE)) + FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), + FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), + FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) ); this.requestSettings = new BreakerSettings(CircuitBreaker.REQUEST, - settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_REQUEST_BREAKER_LIMIT).bytes(), - settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0), - CircuitBreaker.Type.parseValue(settings.get(REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, DEFAULT_BREAKER_TYPE)) + REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), + REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), + REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) ); - this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT, - settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, DEFAULT_TOTAL_CIRCUIT_BREAKER_LIMIT).bytes(), 1.0, CircuitBreaker.Type.PARENT); + this.parentSettings = new BreakerSettings(CircuitBreaker.PARENT, TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).bytes(), 1.0, CircuitBreaker.Type.PARENT); if (logger.isTraceEnabled()) { logger.trace("parent circuit breaker with settings {}", this.parentSettings); } @@ -115,52 +88,38 @@ public class HierarchyCircuitBreakerService extends CircuitBreakerService { registerBreaker(this.requestSettings); registerBreaker(this.fielddataSettings); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, this::setTotalCircuitBreakerLimit, this::validateTotalCircuitBreakerLimit); + clusterSettings.addSettingsUpdateConsumer(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setFieldDataBreakerLimit); + clusterSettings.addSettingsUpdateConsumer(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, this::setRequestBreakerLimit); + } + private void setRequestBreakerLimit(ByteSizeValue newRequestMax, Double newRequestOverhead) { + BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestMax.bytes(), newRequestOverhead, + HierarchyCircuitBreakerService.this.requestSettings.getType()); + registerBreaker(newRequestSettings); + HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings; + logger.info("Updated breaker settings request: {}", newRequestSettings); } - public class ApplySettings implements NodeSettingsService.Listener { + private void setFieldDataBreakerLimit(ByteSizeValue newFielddataMax, Double newFielddataOverhead) { + long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes(); + newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead; + BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead, + HierarchyCircuitBreakerService.this.fielddataSettings.getType()); + registerBreaker(newFielddataSettings); + HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings; + logger.info("Updated breaker settings field data: {}", newFielddataSettings); - @Override - public void onRefreshSettings(Settings settings) { + } - // Fielddata settings - ByteSizeValue newFielddataMax = settings.getAsMemory(FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, null); - Double newFielddataOverhead = settings.getAsDouble(FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, null); - if (newFielddataMax != null || newFielddataOverhead != null) { - long newFielddataLimitBytes = newFielddataMax == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getLimit() : newFielddataMax.bytes(); - newFielddataOverhead = newFielddataOverhead == null ? HierarchyCircuitBreakerService.this.fielddataSettings.getOverhead() : newFielddataOverhead; + private boolean validateTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) { + BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT); + validateSettings(new BreakerSettings[]{newParentSettings}); + return true; + } - BreakerSettings newFielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, newFielddataLimitBytes, newFielddataOverhead, - HierarchyCircuitBreakerService.this.fielddataSettings.getType()); - registerBreaker(newFielddataSettings); - HierarchyCircuitBreakerService.this.fielddataSettings = newFielddataSettings; - logger.info("Updated breaker settings fielddata: {}", newFielddataSettings); - } - - // Request settings - ByteSizeValue newRequestMax = settings.getAsMemory(REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, null); - Double newRequestOverhead = settings.getAsDouble(REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, null); - if (newRequestMax != null || newRequestOverhead != null) { - long newRequestLimitBytes = newRequestMax == null ? HierarchyCircuitBreakerService.this.requestSettings.getLimit() : newRequestMax.bytes(); - newRequestOverhead = newRequestOverhead == null ? HierarchyCircuitBreakerService.this.requestSettings.getOverhead() : newRequestOverhead; - - BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestLimitBytes, newRequestOverhead, - HierarchyCircuitBreakerService.this.requestSettings.getType()); - registerBreaker(newRequestSettings); - HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings; - logger.info("Updated breaker settings request: {}", newRequestSettings); - } - - // Parent settings - long oldParentMax = HierarchyCircuitBreakerService.this.parentSettings.getLimit(); - ByteSizeValue newParentMax = settings.getAsMemory(TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, null); - if (newParentMax != null && (newParentMax.bytes() != oldParentMax)) { - BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, newParentMax.bytes(), 1.0, CircuitBreaker.Type.PARENT); - validateSettings(new BreakerSettings[]{newParentSettings}); - HierarchyCircuitBreakerService.this.parentSettings = newParentSettings; - logger.info("Updated breaker settings parent: {}", newParentSettings); - } - } + private void setTotalCircuitBreakerLimit(ByteSizeValue byteSizeValue) { + BreakerSettings newParentSettings = new BreakerSettings(CircuitBreaker.PARENT, byteSizeValue.bytes(), 1.0, CircuitBreaker.Type.PARENT); + this.parentSettings = newParentSettings; } /** diff --git a/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java b/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java index f7ae5f94b96..220ce9120e9 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/ShardsSyncedFlushResult.java @@ -19,8 +19,12 @@ package org.elasticsearch.indices.flush; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.index.shard.ShardId; +import java.io.IOException; import java.util.HashMap; import java.util.Map; @@ -30,15 +34,15 @@ import static java.util.Collections.unmodifiableMap; /** * Result for all copies of a shard */ -public class ShardsSyncedFlushResult { +public class ShardsSyncedFlushResult implements Streamable { private String failureReason; - private Map shardResponses; + private Map shardResponses; private String syncId; private ShardId shardId; // some shards may be unassigned, so we need this as state private int totalShards; - public ShardsSyncedFlushResult() { + private ShardsSyncedFlushResult() { } public ShardId getShardId() { @@ -59,7 +63,7 @@ public class ShardsSyncedFlushResult { /** * success constructor */ - public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map shardResponses) { + public ShardsSyncedFlushResult(ShardId shardId, String syncId, int totalShards, Map shardResponses) { this.failureReason = null; this.shardResponses = unmodifiableMap(new HashMap<>(shardResponses)); this.syncId = syncId; @@ -98,7 +102,7 @@ public class ShardsSyncedFlushResult { */ public int successfulShards() { int i = 0; - for (SyncedFlushService.SyncedFlushResponse result : shardResponses.values()) { + for (SyncedFlushService.ShardSyncedFlushResponse result : shardResponses.values()) { if (result.success()) { i++; } @@ -109,9 +113,9 @@ public class ShardsSyncedFlushResult { /** * @return an array of shard failures */ - public Map failedShards() { - Map failures = new HashMap<>(); - for (Map.Entry result : shardResponses.entrySet()) { + public Map failedShards() { + Map failures = new HashMap<>(); + for (Map.Entry result : shardResponses.entrySet()) { if (result.getValue().success() == false) { failures.put(result.getKey(), result.getValue()); } @@ -123,11 +127,45 @@ public class ShardsSyncedFlushResult { * @return Individual responses for each shard copy with a detailed failure message if the copy failed to perform the synced flush. * Empty if synced flush failed before step three. */ - public Map shardResponses() { + public Map shardResponses() { return shardResponses; } public ShardId shardId() { return shardId; } + + @Override + public void readFrom(StreamInput in) throws IOException { + failureReason = in.readOptionalString(); + int numResponses = in.readInt(); + shardResponses = new HashMap<>(); + for (int i = 0; i < numResponses; i++) { + ShardRouting shardRouting = ShardRouting.readShardRoutingEntry(in); + SyncedFlushService.ShardSyncedFlushResponse response = SyncedFlushService.ShardSyncedFlushResponse.readSyncedFlushResponse(in); + shardResponses.put(shardRouting, response); + } + syncId = in.readOptionalString(); + shardId = ShardId.readShardId(in); + totalShards = in.readInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalString(failureReason); + out.writeInt(shardResponses.size()); + for (Map.Entry entry : shardResponses.entrySet()) { + entry.getKey().writeTo(out); + entry.getValue().writeTo(out); + } + out.writeOptionalString(syncId); + shardId.writeTo(out); + out.writeInt(totalShards); + } + + public static ShardsSyncedFlushResult readShardsSyncedFlushResult(StreamInput in) throws IOException { + ShardsSyncedFlushResult shardsSyncedFlushResult = new ShardsSyncedFlushResult(); + shardsSyncedFlushResult.readFrom(in); + return shardsSyncedFlushResult; + } } diff --git a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index ad264c2ac05..0918ad2afee 100644 --- a/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/core/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.flush; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -81,9 +82,8 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL this.clusterService = clusterService; this.transportService = transportService; this.indexNameExpressionResolver = indexNameExpressionResolver; - - transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler()); - transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, SyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler()); + transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler()); + transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, ShardSyncedFlushRequest::new, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler()); transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, InFlightOpsRequest::new, ThreadPool.Names.SAME, new InFlightOpCountTransportHandler()); } @@ -109,7 +109,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL * a utility method to perform a synced flush for all shards of multiple indices. see {@link #attemptSyncedFlush(ShardId, ActionListener)} * for more details. */ - public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener listener) { + public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener listener) { final ClusterState state = clusterService.state(); final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices); final Map> results = ConcurrentCollections.newConcurrentMap(); @@ -123,7 +123,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } if (numberOfShards == 0) { - listener.onResponse(new IndicesSyncedFlushResult(results)); + listener.onResponse(new SyncedFlushResponse(results)); return; } final int finalTotalNumberOfShards = totalNumberOfShards; @@ -138,7 +138,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL public void onResponse(ShardsSyncedFlushResult syncedFlushResult) { results.get(index).add(syncedFlushResult); if (countDown.countDown()) { - listener.onResponse(new IndicesSyncedFlushResult(results)); + listener.onResponse(new SyncedFlushResponse(results)); } } @@ -147,7 +147,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL logger.debug("{} unexpected error while executing synced flush", shardId); results.get(index).add(new ShardsSyncedFlushResult(shardId, finalTotalNumberOfShards, e.getMessage())); if (countDown.countDown()) { - listener.onResponse(new IndicesSyncedFlushResult(results)); + listener.onResponse(new SyncedFlushResponse(results)); } } }); @@ -297,33 +297,33 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL void sendSyncRequests(final String syncId, final List shards, ClusterState state, Map expectedCommitIds, final ShardId shardId, final int totalShards, final ActionListener listener) { final CountDown countDown = new CountDown(shards.size()); - final Map results = ConcurrentCollections.newConcurrentMap(); + final Map results = ConcurrentCollections.newConcurrentMap(); for (final ShardRouting shard : shards) { final DiscoveryNode node = state.nodes().get(shard.currentNodeId()); if (node == null) { logger.trace("{} is assigned to an unknown node. skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); - results.put(shard, new SyncedFlushResponse("unknown node")); + results.put(shard, new ShardSyncedFlushResponse("unknown node")); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; } final Engine.CommitId expectedCommitId = expectedCommitIds.get(shard.currentNodeId()); if (expectedCommitId == null) { logger.trace("{} can't resolve expected commit id for {}, skipping for sync id [{}]. shard routing {}", shardId, syncId, shard); - results.put(shard, new SyncedFlushResponse("no commit id from pre-sync flush")); + results.put(shard, new ShardSyncedFlushResponse("no commit id from pre-sync flush")); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); continue; } logger.trace("{} sending synced flush request to {}. sync id [{}].", shardId, shard, syncId); - transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new SyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), - new BaseTransportResponseHandler() { + transportService.sendRequest(node, SYNCED_FLUSH_ACTION_NAME, new ShardSyncedFlushRequest(shard.shardId(), syncId, expectedCommitId), + new BaseTransportResponseHandler() { @Override - public SyncedFlushResponse newInstance() { - return new SyncedFlushResponse(); + public ShardSyncedFlushResponse newInstance() { + return new ShardSyncedFlushResponse(); } @Override - public void handleResponse(SyncedFlushResponse response) { - SyncedFlushResponse existing = results.put(shard, response); + public void handleResponse(ShardSyncedFlushResponse response) { + ShardSyncedFlushResponse existing = results.put(shard, response); assert existing == null : "got two answers for node [" + node + "]"; // count after the assert so we won't decrement twice in handleException contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); @@ -332,7 +332,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public void handleException(TransportException exp) { logger.trace("{} error while performing synced flush on [{}], skipping", exp, shardId, shard); - results.put(shard, new SyncedFlushResponse(exp.getMessage())); + results.put(shard, new ShardSyncedFlushResponse(exp.getMessage())); contDownAndSendResponseIfDone(syncId, shards, shardId, totalShards, listener, countDown, results); } @@ -346,7 +346,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } private void contDownAndSendResponseIfDone(String syncId, List shards, ShardId shardId, int totalShards, - ActionListener listener, CountDown countDown, Map results) { + ActionListener listener, CountDown countDown, Map results) { if (countDown.countDown()) { assert results.size() == shards.size(); listener.onResponse(new ShardsSyncedFlushResult(shardId, syncId, totalShards, results)); @@ -369,7 +369,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } continue; } - transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler() { + transportService.sendRequest(node, PRE_SYNCED_FLUSH_ACTION_NAME, new PreShardSyncedFlushRequest(shard.shardId()), new BaseTransportResponseHandler() { @Override public PreSyncedFlushResponse newInstance() { return new PreSyncedFlushResponse(); @@ -401,7 +401,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } } - private PreSyncedFlushResponse performPreSyncedFlush(PreSyncedFlushRequest request) { + private PreSyncedFlushResponse performPreSyncedFlush(PreShardSyncedFlushRequest request) { IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).getShard(request.shardId().id()); FlushRequest flushRequest = new FlushRequest().force(false).waitIfOngoing(true); logger.trace("{} performing pre sync flush", request.shardId()); @@ -410,7 +410,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL return new PreSyncedFlushResponse(commitId); } - private SyncedFlushResponse performSyncedFlush(SyncedFlushRequest request) { + private ShardSyncedFlushResponse performSyncedFlush(ShardSyncedFlushRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.getShard(request.shardId().id()); logger.trace("{} performing sync flush. sync id [{}], expected commit id {}", request.shardId(), request.syncId(), request.expectedCommitId()); @@ -418,11 +418,11 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL logger.trace("{} sync flush done. sync id [{}], result [{}]", request.shardId(), request.syncId(), result); switch (result) { case SUCCESS: - return new SyncedFlushResponse(); + return new ShardSyncedFlushResponse(); case COMMIT_MISMATCH: - return new SyncedFlushResponse("commit has changed"); + return new ShardSyncedFlushResponse("commit has changed"); case PENDING_OPERATIONS: - return new SyncedFlushResponse("pending operations"); + return new ShardSyncedFlushResponse("pending operations"); default: throw new ElasticsearchException("unknown synced flush result [" + result + "]"); } @@ -439,19 +439,19 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL return new InFlightOpsResponse(opCount); } - public final static class PreSyncedFlushRequest extends TransportRequest { + public final static class PreShardSyncedFlushRequest extends TransportRequest { private ShardId shardId; - public PreSyncedFlushRequest() { + public PreShardSyncedFlushRequest() { } - public PreSyncedFlushRequest(ShardId shardId) { + public PreShardSyncedFlushRequest(ShardId shardId) { this.shardId = shardId; } @Override public String toString() { - return "PreSyncedFlushRequest{" + + return "PreShardSyncedFlushRequest{" + "shardId=" + shardId + '}'; } @@ -504,16 +504,16 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } } - public static final class SyncedFlushRequest extends TransportRequest { + public static final class ShardSyncedFlushRequest extends TransportRequest { private String syncId; private Engine.CommitId expectedCommitId; private ShardId shardId; - public SyncedFlushRequest() { + public ShardSyncedFlushRequest() { } - public SyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { + public ShardSyncedFlushRequest(ShardId shardId, String syncId, Engine.CommitId expectedCommitId) { this.expectedCommitId = expectedCommitId; this.shardId = shardId; this.syncId = syncId; @@ -549,7 +549,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public String toString() { - return "SyncedFlushRequest{" + + return "ShardSyncedFlushRequest{" + "shardId=" + shardId + ",syncId='" + syncId + '\'' + '}'; @@ -559,18 +559,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL /** * Response for third step of synced flush (writing the sync id) for one shard copy */ - public static final class SyncedFlushResponse extends TransportResponse { + public static final class ShardSyncedFlushResponse extends TransportResponse { /** * a non null value indicates a failure to sync flush. null means success */ String failureReason; - public SyncedFlushResponse() { + public ShardSyncedFlushResponse() { failureReason = null; } - public SyncedFlushResponse(String failureReason) { + public ShardSyncedFlushResponse(String failureReason) { this.failureReason = failureReason; } @@ -596,11 +596,17 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL @Override public String toString() { - return "SyncedFlushResponse{" + + return "ShardSyncedFlushResponse{" + "success=" + success() + ", failureReason='" + failureReason + '\'' + '}'; } + + public static ShardSyncedFlushResponse readSyncedFlushResponse(StreamInput in) throws IOException { + ShardSyncedFlushResponse shardSyncedFlushResponse = new ShardSyncedFlushResponse(); + shardSyncedFlushResponse.readFrom(in); + return shardSyncedFlushResponse; + } } @@ -677,18 +683,18 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL } } - private final class PreSyncedFlushTransportHandler implements TransportRequestHandler { + private final class PreSyncedFlushTransportHandler implements TransportRequestHandler { @Override - public void messageReceived(PreSyncedFlushRequest request, TransportChannel channel) throws Exception { + public void messageReceived(PreShardSyncedFlushRequest request, TransportChannel channel) throws Exception { channel.sendResponse(performPreSyncedFlush(request)); } } - private final class SyncedFlushTransportHandler implements TransportRequestHandler { + private final class SyncedFlushTransportHandler implements TransportRequestHandler { @Override - public void messageReceived(SyncedFlushRequest request, TransportChannel channel) throws Exception { + public void messageReceived(ShardSyncedFlushRequest request, TransportChannel channel) throws Exception { channel.sendResponse(performSyncedFlush(request)); } } diff --git a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java index 0cec415d63b..08b7b34e91a 100644 --- a/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java +++ b/core/src/main/java/org/elasticsearch/indices/query/IndicesQueriesRegistry.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.query; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.EmptyQueryBuilder; @@ -40,11 +41,12 @@ public class IndicesQueriesRegistry extends AbstractComponent { public IndicesQueriesRegistry(Settings settings, Set injectedQueryParsers, NamedWriteableRegistry namedWriteableRegistry) { super(settings); Map> queryParsers = new HashMap<>(); - for (QueryParser queryParser : injectedQueryParsers) { + for (@SuppressWarnings("unchecked") QueryParser queryParser : injectedQueryParsers) { for (String name : queryParser.names()) { queryParsers.put(name, queryParser); } - namedWriteableRegistry.registerPrototype(QueryBuilder.class, queryParser.getBuilderPrototype()); + @SuppressWarnings("unchecked") NamedWriteable qb = queryParser.getBuilderPrototype(); + namedWriteableRegistry.registerPrototype(QueryBuilder.class, qb); } // EmptyQueryBuilder is not registered as query parser but used internally. // We need to register it with the NamedWriteableRegistry in order to serialize it @@ -58,4 +60,4 @@ public class IndicesQueriesRegistry extends AbstractComponent { public Map> queryParsers() { return queryParsers; } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 6db38d59e85..682b66e084e 100644 --- a/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/core/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -23,16 +23,16 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; -import java.util.Objects; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -40,34 +40,33 @@ import java.util.concurrent.TimeUnit; */ public class RecoverySettings extends AbstractComponent implements Closeable { - public static final String INDICES_RECOVERY_CONCURRENT_STREAMS = "indices.recovery.concurrent_streams"; - public static final String INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS = "indices.recovery.concurrent_small_file_streams"; - public static final String INDICES_RECOVERY_MAX_BYTES_PER_SEC = "indices.recovery.max_bytes_per_sec"; + public static final Setting INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_streams", 3, true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING = Setting.intSetting("indices.recovery.concurrent_small_file_streams", 2, true, Setting.Scope.CLUSTER); + public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), true, Setting.Scope.CLUSTER); /** * how long to wait before retrying after issues cause by cluster state syncing between nodes * i.e., local node is not yet known on remote node, remote shard not yet started etc. */ - public static final String INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC = "indices.recovery.retry_delay_state_sync"; + public static final Setting INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_state_sync", TimeValue.timeValueMillis(500), true, Setting.Scope.CLUSTER); /** how long to wait before retrying after network related issues */ - public static final String INDICES_RECOVERY_RETRY_DELAY_NETWORK = "indices.recovery.retry_delay_network"; - - /** - * recoveries that don't show any activity for more then this interval will be failed. - * defaults to `indices.recovery.internal_action_long_timeout` - */ - public static final String INDICES_RECOVERY_ACTIVITY_TIMEOUT = "indices.recovery.recovery_activity_timeout"; + public static final Setting INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING = Setting.positiveTimeSetting("indices.recovery.retry_delay_network", TimeValue.timeValueSeconds(5), true, Setting.Scope.CLUSTER); /** timeout value to use for requests made as part of the recovery process */ - public static final String INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT = "indices.recovery.internal_action_timeout"; + public static final Setting INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.recovery.internal_action_timeout", TimeValue.timeValueMinutes(15), true, Setting.Scope.CLUSTER); /** * timeout value to use for requests made as part of the recovery process that are expected to take long time. * defaults to twice `indices.recovery.internal_action_timeout`. */ - public static final String INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT = "indices.recovery.internal_action_long_timeout"; + public static final Setting INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.internal_action_long_timeout", (s) -> TimeValue.timeValueMillis(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(s).millis() * 2).toString(), TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); + /** + * recoveries that don't show any activity for more then this interval will be failed. + * defaults to `indices.recovery.internal_action_long_timeout` + */ + public static final Setting INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING = Setting.timeSetting("indices.recovery.recovery_activity_timeout", (s) -> INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getRaw(s) , TimeValue.timeValueSeconds(0), true, Setting.Scope.CLUSTER); public static final long SMALL_FILE_CUTOFF_BYTES = ByteSizeValue.parseBytesSizeValue("5mb", "SMALL_FILE_CUTOFF_BYTES").bytes(); @@ -89,31 +88,28 @@ public class RecoverySettings extends AbstractComponent implements Closeable { private volatile ByteSizeValue chunkSize = DEFAULT_CHUNK_SIZE; @Inject - public RecoverySettings(Settings settings, NodeSettingsService nodeSettingsService) { + public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { super(settings); - this.retryDelayStateSync = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(500)); + this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the master time to remove a faulty node - this.retryDelayNetwork = settings.getAsTime(INDICES_RECOVERY_RETRY_DELAY_NETWORK, TimeValue.timeValueSeconds(5)); + this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); - this.internalActionTimeout = settings.getAsTime(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, TimeValue.timeValueMinutes(15)); - this.internalActionLongTimeout = settings.getAsTime(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, new TimeValue(internalActionTimeout.millis() * 2)); + this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings); + this.internalActionLongTimeout = INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.get(settings); - this.activityTimeout = settings.getAsTime(INDICES_RECOVERY_ACTIVITY_TIMEOUT, - // default to the internalActionLongTimeout used as timeouts on RecoverySource - internalActionLongTimeout - ); + this.activityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings); - this.concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, 3); + this.concurrentStreams = INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.get(settings); this.concurrentStreamPool = EsExecutors.newScaling("recovery_stream", 0, concurrentStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[recovery_stream]")); - this.concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 2); + this.concurrentSmallFileStreams = INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.get(settings); this.concurrentSmallFileStreamPool = EsExecutors.newScaling("small_file_recovery_stream", 0, concurrentSmallFileStreams, 60, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory(settings, "[small_file_recovery_stream]")); - this.maxBytesPerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(40, ByteSizeUnit.MB)); + this.maxBytesPerSec = INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.get(settings); if (maxBytesPerSec.bytes() <= 0) { rateLimiter = null; } else { @@ -123,7 +119,14 @@ public class RecoverySettings extends AbstractComponent implements Closeable { logger.debug("using max_bytes_per_sec[{}], concurrent_streams [{}]", maxBytesPerSec, concurrentStreams); - nodeSettingsService.addListener(new ApplySettings()); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING, this::setConcurrentStreams); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING, this::setConcurrentSmallFileStreams); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout); + clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); } @Override @@ -173,51 +176,44 @@ public class RecoverySettings extends AbstractComponent implements Closeable { this.chunkSize = chunkSize; } + private void setConcurrentStreams(int concurrentStreams) { + this.concurrentStreams = concurrentStreams; + concurrentStreamPool.setMaximumPoolSize(concurrentStreams); + } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - ByteSizeValue maxSizePerSec = settings.getAsBytesSize(INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec); - if (!Objects.equals(maxSizePerSec, RecoverySettings.this.maxBytesPerSec)) { - logger.info("updating [{}] from [{}] to [{}]", INDICES_RECOVERY_MAX_BYTES_PER_SEC, RecoverySettings.this.maxBytesPerSec, maxSizePerSec); - RecoverySettings.this.maxBytesPerSec = maxSizePerSec; - if (maxSizePerSec.bytes() <= 0) { - rateLimiter = null; - } else if (rateLimiter != null) { - rateLimiter.setMBPerSec(maxSizePerSec.mbFrac()); - } else { - rateLimiter = new SimpleRateLimiter(maxSizePerSec.mbFrac()); - } - } + public void setRetryDelayStateSync(TimeValue retryDelayStateSync) { + this.retryDelayStateSync = retryDelayStateSync; + } - int concurrentStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_STREAMS, RecoverySettings.this.concurrentStreams); - if (concurrentStreams != RecoverySettings.this.concurrentStreams) { - logger.info("updating [indices.recovery.concurrent_streams] from [{}] to [{}]", RecoverySettings.this.concurrentStreams, concurrentStreams); - RecoverySettings.this.concurrentStreams = concurrentStreams; - RecoverySettings.this.concurrentStreamPool.setMaximumPoolSize(concurrentStreams); - } + public void setRetryDelayNetwork(TimeValue retryDelayNetwork) { + this.retryDelayNetwork = retryDelayNetwork; + } - int concurrentSmallFileStreams = settings.getAsInt(INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RecoverySettings.this.concurrentSmallFileStreams); - if (concurrentSmallFileStreams != RecoverySettings.this.concurrentSmallFileStreams) { - logger.info("updating [indices.recovery.concurrent_small_file_streams] from [{}] to [{}]", RecoverySettings.this.concurrentSmallFileStreams, concurrentSmallFileStreams); - RecoverySettings.this.concurrentSmallFileStreams = concurrentSmallFileStreams; - RecoverySettings.this.concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams); - } + public void setActivityTimeout(TimeValue activityTimeout) { + this.activityTimeout = activityTimeout; + } - RecoverySettings.this.retryDelayNetwork = maybeUpdate(RecoverySettings.this.retryDelayNetwork, settings, INDICES_RECOVERY_RETRY_DELAY_NETWORK); - RecoverySettings.this.retryDelayStateSync = maybeUpdate(RecoverySettings.this.retryDelayStateSync, settings, INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC); - RecoverySettings.this.activityTimeout = maybeUpdate(RecoverySettings.this.activityTimeout, settings, INDICES_RECOVERY_ACTIVITY_TIMEOUT); - RecoverySettings.this.internalActionTimeout = maybeUpdate(RecoverySettings.this.internalActionTimeout, settings, INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT); - RecoverySettings.this.internalActionLongTimeout = maybeUpdate(RecoverySettings.this.internalActionLongTimeout, settings, INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT); - } + public void setInternalActionTimeout(TimeValue internalActionTimeout) { + this.internalActionTimeout = internalActionTimeout; + } - private TimeValue maybeUpdate(final TimeValue currentValue, final Settings settings, final String key) { - final TimeValue value = settings.getAsTime(key, currentValue); - if (value.equals(currentValue)) { - return currentValue; - } - logger.info("updating [] from [{}] to [{}]", key, currentValue, value); - return value; + public void setInternalActionLongTimeout(TimeValue internalActionLongTimeout) { + this.internalActionLongTimeout = internalActionLongTimeout; + } + + private void setMaxBytesPerSec(ByteSizeValue maxBytesPerSec) { + this.maxBytesPerSec = maxBytesPerSec; + if (maxBytesPerSec.bytes() <= 0) { + rateLimiter = null; + } else if (rateLimiter != null) { + rateLimiter.setMBPerSec(maxBytesPerSec.mbFrac()); + } else { + rateLimiter = new SimpleRateLimiter(maxBytesPerSec.mbFrac()); } } + + private void setConcurrentSmallFileStreams(int concurrentSmallFileStreams) { + this.concurrentSmallFileStreams = concurrentSmallFileStreams; + concurrentSmallFileStreamPool.setMaximumPoolSize(concurrentSmallFileStreams); + } } diff --git a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java index f095cc355ef..0eed82561a3 100644 --- a/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java +++ b/core/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java @@ -36,6 +36,8 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.uid.Versions; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -49,7 +51,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.node.settings.NodeSettingsService; import java.io.IOException; import java.util.ArrayList; @@ -66,7 +67,7 @@ import java.util.concurrent.locks.ReentrantLock; */ public class IndicesTTLService extends AbstractLifecycleComponent { - public static final String INDICES_TTL_INTERVAL = "indices.ttl.interval"; + public static final Setting INDICES_TTL_INTERVAL_SETTING = Setting.positiveTimeSetting("indices.ttl.interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER); public static final String INDEX_TTL_DISABLE_PURGE = "index.ttl.disable_purge"; private final ClusterService clusterService; @@ -77,16 +78,15 @@ public class IndicesTTLService extends AbstractLifecycleComponent clazz = + (Class)Class.forName("com.sun.management.HotSpotDiagnosticMXBean"); + Class vmOptionClazz = Class.forName("com.sun.management.VMOption"); + PlatformManagedObject hotSpotDiagnosticMXBean = ManagementFactory.getPlatformMXBean(clazz); + Method vmOptionMethod = clazz.getMethod("getVMOption", String.class); + Object useCompressedOopsVmOption = vmOptionMethod.invoke(hotSpotDiagnosticMXBean, "UseCompressedOops"); + Method valueMethod = vmOptionClazz.getMethod("getValue"); + info.useCompressedOops = (String)valueMethod.invoke(useCompressedOopsVmOption); + } catch (Throwable t) { + // unable to deduce the state of compressed oops + info.useCompressedOops = "unknown"; + } + INSTANCE = info; } @@ -135,6 +156,8 @@ public class JvmInfo implements Streamable, ToXContent { String[] gcCollectors = Strings.EMPTY_ARRAY; String[] memoryPools = Strings.EMPTY_ARRAY; + private String useCompressedOops; + private JvmInfo() { } @@ -258,6 +281,18 @@ public class JvmInfo implements Streamable, ToXContent { return this.systemProperties; } + /** + * The value of the JVM flag UseCompressedOops, if available otherwise + * "unknown". The value "unknown" indicates that an attempt was + * made to obtain the value of the flag on this JVM and the attempt + * failed. + * + * @return the value of the JVM flag UseCompressedOops or "unknown" + */ + public String useCompressedOops() { + return this.useCompressedOops; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(Fields.JVM); @@ -279,6 +314,8 @@ public class JvmInfo implements Streamable, ToXContent { builder.field(Fields.GC_COLLECTORS, gcCollectors); builder.field(Fields.MEMORY_POOLS, memoryPools); + builder.field(Fields.USING_COMPRESSED_OOPS, useCompressedOops); + builder.endObject(); return builder; } @@ -306,6 +343,7 @@ public class JvmInfo implements Streamable, ToXContent { static final XContentBuilderString DIRECT_MAX_IN_BYTES = new XContentBuilderString("direct_max_in_bytes"); static final XContentBuilderString GC_COLLECTORS = new XContentBuilderString("gc_collectors"); static final XContentBuilderString MEMORY_POOLS = new XContentBuilderString("memory_pools"); + static final XContentBuilderString USING_COMPRESSED_OOPS = new XContentBuilderString("using_compressed_ordinary_object_pointers"); } public static JvmInfo readJvmInfo(StreamInput in) throws IOException { @@ -337,6 +375,7 @@ public class JvmInfo implements Streamable, ToXContent { mem.readFrom(in); gcCollectors = in.readStringArray(); memoryPools = in.readStringArray(); + useCompressedOops = in.readString(); } @Override @@ -361,6 +400,7 @@ public class JvmInfo implements Streamable, ToXContent { mem.writeTo(out); out.writeStringArray(gcCollectors); out.writeStringArray(memoryPools); + out.writeString(useCompressedOops); } public static class Mem implements Streamable { diff --git a/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java new file mode 100644 index 00000000000..599755e78a4 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/monitor/os/DummyOsInfo.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.monitor.os; + +public class DummyOsInfo extends OsInfo { + + DummyOsInfo() { + refreshInterval = 0; + availableProcessors = 0; + allocatedProcessors = 0; + name = "dummy_name"; + arch = "dummy_arch"; + version = "dummy_version"; + } + + public static final DummyOsInfo INSTANCE = new DummyOsInfo(); +} diff --git a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java index f34cd51a143..d94447221c3 100644 --- a/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java +++ b/core/src/main/java/org/elasticsearch/monitor/os/OsInfo.java @@ -108,6 +108,9 @@ public class OsInfo implements Streamable, ToXContent { refreshInterval = in.readLong(); availableProcessors = in.readInt(); allocatedProcessors = in.readInt(); + name = in.readOptionalString(); + arch = in.readOptionalString(); + version = in.readOptionalString(); } @Override @@ -115,5 +118,8 @@ public class OsInfo implements Streamable, ToXContent { out.writeLong(refreshInterval); out.writeInt(availableProcessors); out.writeInt(allocatedProcessors); + out.writeOptionalString(name); + out.writeOptionalString(arch); + out.writeOptionalString(version); } } diff --git a/core/src/main/java/org/elasticsearch/node/Node.java b/core/src/main/java/org/elasticsearch/node/Node.java index f4bc34a91e2..c964e79587e 100644 --- a/core/src/main/java/org/elasticsearch/node/Node.java +++ b/core/src/main/java/org/elasticsearch/node/Node.java @@ -23,7 +23,6 @@ import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionModule; -import org.elasticsearch.bootstrap.Elasticsearch; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClientModule; @@ -42,11 +41,15 @@ import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryService; @@ -58,7 +61,7 @@ import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.http.HttpServer; -import org.elasticsearch.http.HttpServerModule; +import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.AnalysisModule; @@ -73,7 +76,6 @@ import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.monitor.MonitorService; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.percolator.PercolatorModule; import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.plugins.Plugin; @@ -81,7 +83,6 @@ import org.elasticsearch.plugins.PluginsModule; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestModule; import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; @@ -90,14 +91,21 @@ import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPoolModule; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportService; import org.elasticsearch.tribe.TribeModule; import org.elasticsearch.tribe.TribeService; import org.elasticsearch.watcher.ResourceWatcherModule; import org.elasticsearch.watcher.ResourceWatcherService; +import java.io.BufferedWriter; import java.io.IOException; +import java.net.Inet6Address; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.charset.Charset; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -156,7 +164,6 @@ public class Node implements Releasable { throw new IllegalStateException("Failed to created node environment", ex); } final NetworkService networkService = new NetworkService(settings); - final NodeSettingsService nodeSettingsService = new NodeSettingsService(settings); final SettingsFilter settingsFilter = new SettingsFilter(settings); final ThreadPool threadPool = new ThreadPool(settings); boolean success = false; @@ -171,20 +178,15 @@ public class Node implements Releasable { } modules.add(new PluginsModule(pluginsService)); modules.add(new SettingsModule(this.settings, settingsFilter)); - modules.add(new NodeModule(this, nodeSettingsService, monitorService)); - modules.add(new NetworkModule(networkService)); - modules.add(new ScriptModule(this.settings)); modules.add(new EnvironmentModule(environment)); + modules.add(new NodeModule(this, monitorService)); + modules.add(new NetworkModule(networkService, settings, false)); + modules.add(new ScriptModule(this.settings)); modules.add(new NodeEnvironmentModule(nodeEnvironment)); modules.add(new ClusterNameModule(this.settings)); modules.add(new ThreadPoolModule(threadPool)); modules.add(new DiscoveryModule(this.settings)); modules.add(new ClusterModule(this.settings)); - modules.add(new RestModule(this.settings)); - modules.add(new TransportModule(settings)); - if (settings.getAsBoolean(HTTP_ENABLED, true)) { - modules.add(new HttpServerModule(settings)); - } modules.add(new IndicesModule()); modules.add(new SearchModule()); modules.add(new ActionModule(false)); @@ -201,7 +203,7 @@ public class Node implements Releasable { injector = modules.createInjector(); client = injector.getInstance(Client.class); - threadPool.setNodeSettingsService(injector.getInstance(NodeSettingsService.class)); + threadPool.setClusterSettings(injector.getInstance(ClusterSettings.class)); success = true; } catch (IOException ex) { throw new ElasticsearchException("failed to bind service", ex); @@ -274,6 +276,15 @@ public class Node implements Releasable { injector.getInstance(ResourceWatcherService.class).start(); injector.getInstance(TribeService.class).start(); + if (System.getProperty("es.tests.portsfile", "false").equals("true")) { + if (settings.getAsBoolean("http.enabled", true)) { + HttpServerTransport http = injector.getInstance(HttpServerTransport.class); + writePortsFile("http", http.boundAddress()); + } + TransportService transport = injector.getInstance(TransportService.class); + writePortsFile("transport", transport.boundAddress()); + } + logger.info("started"); return this; @@ -425,4 +436,27 @@ public class Node implements Releasable { public Injector injector() { return this.injector; } + + /** Writes a file to the logs dir containing the ports for the given transport type */ + private void writePortsFile(String type, BoundTransportAddress boundAddress) { + Path tmpPortsFile = environment.logsFile().resolve(type + ".ports.tmp"); + try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { + for (TransportAddress address : boundAddress.boundAddresses()) { + InetAddress inetAddress = InetAddress.getByName(address.getAddress()); + if (inetAddress instanceof Inet6Address && inetAddress.isLinkLocalAddress()) { + // no link local, just causes problems + continue; + } + writer.write(NetworkAddress.formatAddress(new InetSocketAddress(inetAddress, address.getPort())) + "\n"); + } + } catch (IOException e) { + throw new RuntimeException("Failed to write ports file", e); + } + Path portsFile = environment.logsFile().resolve(type + ".ports"); + try { + Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE); + } catch (IOException e) { + throw new RuntimeException("Failed to rename ports file", e); + } + } } diff --git a/core/src/main/java/org/elasticsearch/node/NodeModule.java b/core/src/main/java/org/elasticsearch/node/NodeModule.java index 3641c325030..aa52d389340 100644 --- a/core/src/main/java/org/elasticsearch/node/NodeModule.java +++ b/core/src/main/java/org/elasticsearch/node/NodeModule.java @@ -23,9 +23,7 @@ import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.monitor.MonitorService; -import org.elasticsearch.node.Node; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.node.settings.NodeSettingsService; /** * @@ -33,16 +31,14 @@ import org.elasticsearch.node.settings.NodeSettingsService; public class NodeModule extends AbstractModule { private final Node node; - private final NodeSettingsService nodeSettingsService; private final MonitorService monitorService; // pkg private so tests can mock Class pageCacheRecyclerImpl = PageCacheRecycler.class; Class bigArraysImpl = BigArrays.class; - public NodeModule(Node node, NodeSettingsService nodeSettingsService, MonitorService monitorService) { + public NodeModule(Node node, MonitorService monitorService) { this.node = node; - this.nodeSettingsService = nodeSettingsService; this.monitorService = monitorService; } @@ -60,7 +56,6 @@ public class NodeModule extends AbstractModule { } bind(Node.class).toInstance(node); - bind(NodeSettingsService.class).toInstance(nodeSettingsService); bind(MonitorService.class).toInstance(monitorService); bind(NodeService.class).asEagerSingleton(); } diff --git a/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java b/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java deleted file mode 100644 index dbe6a33172b..00000000000 --- a/core/src/main/java/org/elasticsearch/node/settings/NodeSettingsService.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.node.settings; - -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.logging.ESLoggerFactory; -import org.elasticsearch.common.settings.Settings; - -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; - -/** - * A service that allows to register for node settings change that can come from cluster - * events holding new settings. - */ -public class NodeSettingsService extends AbstractComponent implements ClusterStateListener { - - private static volatile Settings globalSettings = Settings.Builder.EMPTY_SETTINGS; - - /** - * Returns the global (static) settings last updated by a node. Note, if you have multiple - * nodes on the same JVM, it will just return the latest one set... - */ - public static Settings getGlobalSettings() { - return globalSettings; - } - - private volatile Settings lastSettingsApplied; - - private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); - - @Inject - public NodeSettingsService(Settings settings) { - super(settings); - globalSettings = settings; - } - - // inject it as a member, so we won't get into possible cyclic problems - public void setClusterService(ClusterService clusterService) { - clusterService.add(this); - } - - @Override - public void clusterChanged(ClusterChangedEvent event) { - // nothing to do until we actually recover from the gateway or any other block indicates we need to disable persistency - if (event.state().blocks().disableStatePersistence()) { - return; - } - - if (!event.metaDataChanged()) { - // nothing changed in the metadata, no need to check - return; - } - - if (lastSettingsApplied != null && event.state().metaData().settings().equals(lastSettingsApplied)) { - // nothing changed in the settings, ignore - return; - } - - for (Listener listener : listeners) { - try { - listener.onRefreshSettings(event.state().metaData().settings()); - } catch (Exception e) { - logger.warn("failed to refresh settings for [{}]", e, listener); - } - } - - try { - for (Map.Entry entry : event.state().metaData().settings().getAsMap().entrySet()) { - if (entry.getKey().startsWith("logger.")) { - String component = entry.getKey().substring("logger.".length()); - if ("_root".equals(component)) { - ESLoggerFactory.getRootLogger().setLevel(entry.getValue()); - } else { - ESLoggerFactory.getLogger(component).setLevel(entry.getValue()); - } - } - } - } catch (Exception e) { - logger.warn("failed to refresh settings for [{}]", e, "logger"); - } - - lastSettingsApplied = event.state().metaData().settings(); - globalSettings = lastSettingsApplied; - } - - /** - * Only settings registered in {@link org.elasticsearch.cluster.ClusterModule} can be changed dynamically. - */ - public void addListener(Listener listener) { - this.listeners.add(listener); - } - - public void removeListener(Listener listener) { - this.listeners.remove(listener); - } - - public interface Listener { - void onRefreshSettings(Settings settings); - } -} diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java index 70abaaaff3d..8df956f2cea 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -38,12 +38,11 @@ import org.elasticsearch.common.HasHeaders; import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; @@ -74,6 +73,8 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profiler; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -139,7 +140,7 @@ public class PercolateContext extends SearchContext { this.bigArrays = bigArrays.withCircuitBreaking(); this.querySearchResult = new QuerySearchResult(0, searchShardTarget); this.engineSearcher = indexShard.acquireSearcher("percolate"); - this.searcher = new ContextIndexSearcher(this, engineSearcher); + this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.scriptService = scriptService; this.numberOfShards = request.getNumberOfShards(); this.aliasFilter = aliasFilter; @@ -164,7 +165,7 @@ public class PercolateContext extends SearchContext { fields.put(field.name(), new InternalSearchHitField(field.name(), Collections.emptyList())); } hitContext().reset( - new InternalSearchHit(0, "unknown", new StringText(parsedDocument.type()), fields), + new InternalSearchHit(0, "unknown", new Text(parsedDocument.type()), fields), atomicReaderContext, 0, docSearcher.searcher() ); } @@ -748,5 +749,7 @@ public class PercolateContext extends SearchContext { } @Override - public QueryCache getQueryCache() { return indexService.cache().query();} + public Profilers getProfilers() { + throw new UnsupportedOperationException(); + } } diff --git a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java index fa7b47766a8..eb33f3832b4 100644 --- a/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/core/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -52,8 +52,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.BytesText; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -533,10 +531,10 @@ public class PercolatorService extends AbstractComponent { List finalMatches = new ArrayList<>(requestedSize == 0 ? numMatches : requestedSize); outer: for (PercolateShardResponse response : shardResults) { - Text index = new StringText(response.getIndex()); + Text index = new Text(response.getIndex()); for (int i = 0; i < response.matches().length; i++) { float score = response.scores().length == 0 ? NO_SCORE : response.scores()[i]; - Text match = new BytesText(new BytesArray(response.matches()[i])); + Text match = new Text(new BytesArray(response.matches()[i])); Map hl = response.hls().isEmpty() ? null : response.hls().get(i); finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); if (requestedSize != 0 && finalMatches.size() == requestedSize) { @@ -686,10 +684,10 @@ public class PercolatorService extends AbstractComponent { List finalMatches = new ArrayList<>(requestedSize); if (nonEmptyResponses == 1) { PercolateShardResponse response = shardResults.get(firstNonEmptyIndex); - Text index = new StringText(response.getIndex()); + Text index = new Text(response.getIndex()); for (int i = 0; i < response.matches().length; i++) { float score = response.scores().length == 0 ? Float.NaN : response.scores()[i]; - Text match = new BytesText(new BytesArray(response.matches()[i])); + Text match = new Text(new BytesArray(response.matches()[i])); if (!response.hls().isEmpty()) { Map hl = response.hls().get(i); finalMatches.add(new PercolateResponse.Match(index, match, score, hl)); @@ -728,8 +726,8 @@ public class PercolatorService extends AbstractComponent { slots[requestIndex]++; PercolateShardResponse shardResponse = shardResults.get(requestIndex); - Text index = new StringText(shardResponse.getIndex()); - Text match = new BytesText(new BytesArray(shardResponse.matches()[itemIndex])); + Text index = new Text(shardResponse.getIndex()); + Text match = new Text(new BytesArray(shardResponse.matches()[itemIndex])); float score = shardResponse.scores()[itemIndex]; if (!shardResponse.hls().isEmpty()) { Map hl = shardResponse.hls().get(itemIndex); diff --git a/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java new file mode 100644 index 00000000000..a57a96c631d --- /dev/null +++ b/core/src/main/java/org/elasticsearch/plugins/DummyPluginInfo.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.plugins; + +public class DummyPluginInfo extends PluginInfo { + + private DummyPluginInfo(String name, String description, boolean site, String version, boolean jvm, String classname, boolean isolated) { + super(name, description, site, version, jvm, classname, isolated); + } + + public static final DummyPluginInfo INSTANCE = new DummyPluginInfo("dummy_plugin_name", "dummy plugin description", true, "dummy_plugin_version", true, "DummyPluginName", true); +} diff --git a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java index 6600bf7035d..1ebe7813d3c 100644 --- a/core/src/main/java/org/elasticsearch/plugins/PluginManager.java +++ b/core/src/main/java/org/elasticsearch/plugins/PluginManager.java @@ -22,6 +22,7 @@ package org.elasticsearch.plugins; import org.apache.lucene.util.IOUtils; import org.elasticsearch.*; import org.elasticsearch.bootstrap.JarHell; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.cli.Terminal; import org.elasticsearch.common.collect.Tuple; @@ -66,7 +67,7 @@ public class PluginManager { "plugin", "plugin.bat", "service.bat")); - + static final Set MODULES = unmodifiableSet(newHashSet( "lang-expression", "lang-groovy")); @@ -89,6 +90,7 @@ public class PluginManager { "mapper-murmur3", "mapper-size", "repository-azure", + "repository-hdfs", "repository-s3", "store-smb")); @@ -124,7 +126,7 @@ public class PluginManager { checkForForbiddenName(pluginHandle.name); } else { // if we have no name but url, use temporary name that will be overwritten later - pluginHandle = new PluginHandle("temp_name" + new Random().nextInt(), null, null); + pluginHandle = new PluginHandle("temp_name" + Randomness.get().nextInt(), null, null); } Path pluginFile = download(pluginHandle, terminal); @@ -224,7 +226,7 @@ public class PluginManager { PluginInfo info = PluginInfo.readFromProperties(root); terminal.println(VERBOSE, "%s", info); - // don't let luser install plugin as a module... + // don't let luser install plugin as a module... // they might be unavoidably in maven central and are packaged up the same way) if (MODULES.contains(info.getName())) { throw new IOException("plugin '" + info.getName() + "' cannot be installed like this, it is a system module"); diff --git a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java deleted file mode 100644 index f0e4d10d7c4..00000000000 --- a/core/src/main/java/org/elasticsearch/rest/action/RestActionModule.java +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.multibindings.Multibinder; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; -import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; -import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; -import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.put.RestPutRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerifyRepositoryAction; -import org.elasticsearch.rest.action.admin.cluster.reroute.RestClusterRerouteAction; -import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterGetSettingsAction; -import org.elasticsearch.rest.action.admin.cluster.settings.RestClusterUpdateSettingsAction; -import org.elasticsearch.rest.action.admin.cluster.shards.RestClusterSearchShardsAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.create.RestCreateSnapshotAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSnapshotAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction; -import org.elasticsearch.rest.action.admin.cluster.snapshots.status.RestSnapshotsStatusAction; -import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction; -import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction; -import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction; -import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.delete.RestIndexDeleteAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.get.RestGetIndicesAliasesAction; -import org.elasticsearch.rest.action.admin.indices.alias.head.RestAliasesExistAction; -import org.elasticsearch.rest.action.admin.indices.alias.put.RestIndexPutAliasAction; -import org.elasticsearch.rest.action.admin.indices.analyze.RestAnalyzeAction; -import org.elasticsearch.rest.action.admin.indices.cache.clear.RestClearIndicesCacheAction; -import org.elasticsearch.rest.action.admin.indices.close.RestCloseIndexAction; -import org.elasticsearch.rest.action.admin.indices.create.RestCreateIndexAction; -import org.elasticsearch.rest.action.admin.indices.delete.RestDeleteIndexAction; -import org.elasticsearch.rest.action.admin.indices.exists.indices.RestIndicesExistsAction; -import org.elasticsearch.rest.action.admin.indices.exists.types.RestTypesExistsAction; -import org.elasticsearch.rest.action.admin.indices.flush.RestFlushAction; -import org.elasticsearch.rest.action.admin.indices.flush.RestSyncedFlushAction; -import org.elasticsearch.rest.action.admin.indices.forcemerge.RestForceMergeAction; -import org.elasticsearch.rest.action.admin.indices.get.RestGetIndicesAction; -import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetFieldMappingAction; -import org.elasticsearch.rest.action.admin.indices.mapping.get.RestGetMappingAction; -import org.elasticsearch.rest.action.admin.indices.mapping.put.RestPutMappingAction; -import org.elasticsearch.rest.action.admin.indices.open.RestOpenIndexAction; -import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; -import org.elasticsearch.rest.action.admin.indices.refresh.RestRefreshAction; -import org.elasticsearch.rest.action.admin.indices.segments.RestIndicesSegmentsAction; -import org.elasticsearch.rest.action.admin.indices.settings.RestGetSettingsAction; -import org.elasticsearch.rest.action.admin.indices.settings.RestUpdateSettingsAction; -import org.elasticsearch.rest.action.admin.indices.shards.RestIndicesShardStoresAction; -import org.elasticsearch.rest.action.admin.indices.stats.RestIndicesStatsAction; -import org.elasticsearch.rest.action.admin.indices.template.delete.RestDeleteIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.template.get.RestGetIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.template.head.RestHeadIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.template.put.RestPutIndexTemplateAction; -import org.elasticsearch.rest.action.admin.indices.upgrade.RestUpgradeAction; -import org.elasticsearch.rest.action.admin.indices.validate.query.RestValidateQueryAction; -import org.elasticsearch.rest.action.admin.indices.validate.template.RestRenderSearchTemplateAction; -import org.elasticsearch.rest.action.admin.indices.warmer.delete.RestDeleteWarmerAction; -import org.elasticsearch.rest.action.admin.indices.warmer.get.RestGetWarmerAction; -import org.elasticsearch.rest.action.admin.indices.warmer.put.RestPutWarmerAction; -import org.elasticsearch.rest.action.bulk.RestBulkAction; -import org.elasticsearch.rest.action.cat.AbstractCatAction; -import org.elasticsearch.rest.action.cat.RestAliasAction; -import org.elasticsearch.rest.action.cat.RestAllocationAction; -import org.elasticsearch.rest.action.cat.RestCatAction; -import org.elasticsearch.rest.action.cat.RestFielddataAction; -import org.elasticsearch.rest.action.cat.RestHealthAction; -import org.elasticsearch.rest.action.cat.RestIndicesAction; -import org.elasticsearch.rest.action.cat.RestMasterAction; -import org.elasticsearch.rest.action.cat.RestNodeAttrsAction; -import org.elasticsearch.rest.action.cat.RestNodesAction; -import org.elasticsearch.rest.action.cat.RestPluginsAction; -import org.elasticsearch.rest.action.cat.RestRepositoriesAction; -import org.elasticsearch.rest.action.cat.RestSegmentsAction; -import org.elasticsearch.rest.action.cat.RestShardsAction; -import org.elasticsearch.rest.action.cat.RestSnapshotAction; -import org.elasticsearch.rest.action.cat.RestThreadPoolAction; -import org.elasticsearch.rest.action.delete.RestDeleteAction; -import org.elasticsearch.rest.action.explain.RestExplainAction; -import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; -import org.elasticsearch.rest.action.get.RestGetAction; -import org.elasticsearch.rest.action.get.RestGetSourceAction; -import org.elasticsearch.rest.action.get.RestHeadAction; -import org.elasticsearch.rest.action.get.RestMultiGetAction; -import org.elasticsearch.rest.action.index.RestIndexAction; -import org.elasticsearch.rest.action.main.RestMainAction; -import org.elasticsearch.rest.action.percolate.RestMultiPercolateAction; -import org.elasticsearch.rest.action.percolate.RestPercolateAction; -import org.elasticsearch.rest.action.script.RestDeleteIndexedScriptAction; -import org.elasticsearch.rest.action.script.RestGetIndexedScriptAction; -import org.elasticsearch.rest.action.script.RestPutIndexedScriptAction; -import org.elasticsearch.rest.action.search.RestClearScrollAction; -import org.elasticsearch.rest.action.search.RestMultiSearchAction; -import org.elasticsearch.rest.action.search.RestSearchAction; -import org.elasticsearch.rest.action.search.RestSearchScrollAction; -import org.elasticsearch.rest.action.suggest.RestSuggestAction; -import org.elasticsearch.rest.action.template.RestDeleteSearchTemplateAction; -import org.elasticsearch.rest.action.template.RestGetSearchTemplateAction; -import org.elasticsearch.rest.action.template.RestPutSearchTemplateAction; -import org.elasticsearch.rest.action.termvectors.RestMultiTermVectorsAction; -import org.elasticsearch.rest.action.termvectors.RestTermVectorsAction; -import org.elasticsearch.rest.action.update.RestUpdateAction; - -import java.util.ArrayList; -import java.util.List; - -/** - * - */ -public class RestActionModule extends AbstractModule { - private List> restPluginsActions = new ArrayList<>(); - - public RestActionModule(List> restPluginsActions) { - this.restPluginsActions = restPluginsActions; - } - - @Override - protected void configure() { - for (Class restAction : restPluginsActions) { - bind(restAction).asEagerSingleton(); - } - - bind(RestMainAction.class).asEagerSingleton(); - - bind(RestNodesInfoAction.class).asEagerSingleton(); - bind(RestNodesStatsAction.class).asEagerSingleton(); - bind(RestNodesHotThreadsAction.class).asEagerSingleton(); - bind(RestClusterStatsAction.class).asEagerSingleton(); - bind(RestClusterStateAction.class).asEagerSingleton(); - bind(RestClusterHealthAction.class).asEagerSingleton(); - bind(RestClusterUpdateSettingsAction.class).asEagerSingleton(); - bind(RestClusterGetSettingsAction.class).asEagerSingleton(); - bind(RestClusterRerouteAction.class).asEagerSingleton(); - bind(RestClusterSearchShardsAction.class).asEagerSingleton(); - bind(RestPendingClusterTasksAction.class).asEagerSingleton(); - bind(RestPutRepositoryAction.class).asEagerSingleton(); - bind(RestGetRepositoriesAction.class).asEagerSingleton(); - bind(RestDeleteRepositoryAction.class).asEagerSingleton(); - bind(RestVerifyRepositoryAction.class).asEagerSingleton(); - bind(RestGetSnapshotsAction.class).asEagerSingleton(); - bind(RestCreateSnapshotAction.class).asEagerSingleton(); - bind(RestRestoreSnapshotAction.class).asEagerSingleton(); - bind(RestDeleteSnapshotAction.class).asEagerSingleton(); - bind(RestSnapshotsStatusAction.class).asEagerSingleton(); - - bind(RestIndicesExistsAction.class).asEagerSingleton(); - bind(RestTypesExistsAction.class).asEagerSingleton(); - bind(RestGetIndicesAction.class).asEagerSingleton(); - bind(RestIndicesStatsAction.class).asEagerSingleton(); - bind(RestIndicesSegmentsAction.class).asEagerSingleton(); - bind(RestIndicesShardStoresAction.class).asEagerSingleton(); - bind(RestGetAliasesAction.class).asEagerSingleton(); - bind(RestAliasesExistAction.class).asEagerSingleton(); - bind(RestIndexDeleteAliasesAction.class).asEagerSingleton(); - bind(RestIndexPutAliasAction.class).asEagerSingleton(); - bind(RestIndicesAliasesAction.class).asEagerSingleton(); - bind(RestGetIndicesAliasesAction.class).asEagerSingleton(); - bind(RestCreateIndexAction.class).asEagerSingleton(); - bind(RestDeleteIndexAction.class).asEagerSingleton(); - bind(RestCloseIndexAction.class).asEagerSingleton(); - bind(RestOpenIndexAction.class).asEagerSingleton(); - - bind(RestUpdateSettingsAction.class).asEagerSingleton(); - bind(RestGetSettingsAction.class).asEagerSingleton(); - - bind(RestAnalyzeAction.class).asEagerSingleton(); - bind(RestGetIndexTemplateAction.class).asEagerSingleton(); - bind(RestPutIndexTemplateAction.class).asEagerSingleton(); - bind(RestDeleteIndexTemplateAction.class).asEagerSingleton(); - bind(RestHeadIndexTemplateAction.class).asEagerSingleton(); - - bind(RestPutWarmerAction.class).asEagerSingleton(); - bind(RestDeleteWarmerAction.class).asEagerSingleton(); - bind(RestGetWarmerAction.class).asEagerSingleton(); - - bind(RestPutMappingAction.class).asEagerSingleton(); - bind(RestGetMappingAction.class).asEagerSingleton(); - bind(RestGetFieldMappingAction.class).asEagerSingleton(); - - bind(RestRefreshAction.class).asEagerSingleton(); - bind(RestFlushAction.class).asEagerSingleton(); - bind(RestSyncedFlushAction.class).asEagerSingleton(); - bind(RestForceMergeAction.class).asEagerSingleton(); - bind(RestUpgradeAction.class).asEagerSingleton(); - bind(RestClearIndicesCacheAction.class).asEagerSingleton(); - - bind(RestIndexAction.class).asEagerSingleton(); - bind(RestGetAction.class).asEagerSingleton(); - bind(RestGetSourceAction.class).asEagerSingleton(); - bind(RestHeadAction.class).asEagerSingleton(); - bind(RestMultiGetAction.class).asEagerSingleton(); - bind(RestDeleteAction.class).asEagerSingleton(); - bind(org.elasticsearch.rest.action.count.RestCountAction.class).asEagerSingleton(); - bind(RestSuggestAction.class).asEagerSingleton(); - bind(RestTermVectorsAction.class).asEagerSingleton(); - bind(RestMultiTermVectorsAction.class).asEagerSingleton(); - bind(RestBulkAction.class).asEagerSingleton(); - bind(RestUpdateAction.class).asEagerSingleton(); - bind(RestPercolateAction.class).asEagerSingleton(); - bind(RestMultiPercolateAction.class).asEagerSingleton(); - - bind(RestSearchAction.class).asEagerSingleton(); - bind(RestSearchScrollAction.class).asEagerSingleton(); - bind(RestClearScrollAction.class).asEagerSingleton(); - bind(RestMultiSearchAction.class).asEagerSingleton(); - bind(RestRenderSearchTemplateAction.class).asEagerSingleton(); - - bind(RestValidateQueryAction.class).asEagerSingleton(); - - bind(RestExplainAction.class).asEagerSingleton(); - - bind(RestRecoveryAction.class).asEagerSingleton(); - - // Templates API - bind(RestGetSearchTemplateAction.class).asEagerSingleton(); - bind(RestPutSearchTemplateAction.class).asEagerSingleton(); - bind(RestDeleteSearchTemplateAction.class).asEagerSingleton(); - - // Scripts API - bind(RestGetIndexedScriptAction.class).asEagerSingleton(); - bind(RestPutIndexedScriptAction.class).asEagerSingleton(); - bind(RestDeleteIndexedScriptAction.class).asEagerSingleton(); - - - bind(RestFieldStatsAction.class).asEagerSingleton(); - - // cat API - Multibinder catActionMultibinder = Multibinder.newSetBinder(binder(), AbstractCatAction.class); - catActionMultibinder.addBinding().to(RestAllocationAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestShardsAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestMasterAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestNodesAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestIndicesAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestSegmentsAction.class).asEagerSingleton(); - // Fully qualified to prevent interference with rest.action.count.RestCountAction - catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestCountAction.class).asEagerSingleton(); - // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction - catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestRecoveryAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestHealthAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestAliasAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestThreadPoolAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestPluginsAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestFielddataAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestNodeAttrsAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestRepositoriesAction.class).asEagerSingleton(); - catActionMultibinder.addBinding().to(RestSnapshotAction.class).asEagerSingleton(); - // no abstract cat action - bind(RestCatAction.class).asEagerSingleton(); - } -} diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java index a1cfdb48ddb..b7b5064c096 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/cluster/settings/RestClusterGetSettingsAction.java @@ -23,19 +23,27 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestBuilderListener; +import java.io.IOException; + /** */ public class RestClusterGetSettingsAction extends BaseRestHandler { + private final ClusterSettings clusterSettings; + @Inject - public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client) { + public RestClusterGetSettingsAction(Settings settings, RestController controller, Client client, ClusterSettings clusterSettings) { super(settings, controller, client); + this.clusterSettings = clusterSettings; controller.registerHandler(RestRequest.Method.GET, "/_cluster/settings", this); } @@ -44,24 +52,34 @@ public class RestClusterGetSettingsAction extends BaseRestHandler { ClusterStateRequest clusterStateRequest = Requests.clusterStateRequest() .routingTable(false) .nodes(false); + final boolean renderDefaults = request.paramAsBoolean("defaults", false); clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local())); client.admin().cluster().state(clusterStateRequest, new RestBuilderListener(channel) { @Override public RestResponse buildResponse(ClusterStateResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - - builder.startObject("persistent"); - response.getState().metaData().persistentSettings().toXContent(builder, request); - builder.endObject(); - - builder.startObject("transient"); - response.getState().metaData().transientSettings().toXContent(builder, request); - builder.endObject(); - - builder.endObject(); - - return new BytesRestResponse(RestStatus.OK, builder); + return new BytesRestResponse(RestStatus.OK, renderResponse(response.getState(), renderDefaults, builder, request)); } }); } + + private XContentBuilder renderResponse(ClusterState state, boolean renderDefaults, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + + builder.startObject("persistent"); + state.metaData().persistentSettings().toXContent(builder, params); + builder.endObject(); + + builder.startObject("transient"); + state.metaData().transientSettings().toXContent(builder, params); + builder.endObject(); + + if (renderDefaults) { + builder.startObject("defaults"); + clusterSettings.diff(state.metaData().settings(), this.settings).toXContent(builder, params); + builder.endObject(); + } + + builder.endObject(); + return builder; + } } diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java index 9a3f844abb1..0b8ffcf94da 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/flush/RestSyncedFlushAction.java @@ -19,14 +19,14 @@ package org.elasticsearch.rest.action.admin.indices.flush; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushRequest; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.indices.flush.IndicesSyncedFlushResult; -import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.support.RestBuilderListener; @@ -38,12 +38,9 @@ import static org.elasticsearch.rest.RestRequest.Method.POST; */ public class RestSyncedFlushAction extends BaseRestHandler { - private final SyncedFlushService syncedFlushService; - @Inject - public RestSyncedFlushAction(Settings settings, RestController controller, Client client, SyncedFlushService syncedFlushService) { + public RestSyncedFlushAction(Settings settings, RestController controller, Client client) { super(settings, controller, client); - this.syncedFlushService = syncedFlushService; controller.registerHandler(POST, "/_flush/synced", this); controller.registerHandler(POST, "/{index}/_flush/synced", this); @@ -53,12 +50,12 @@ public class RestSyncedFlushAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - String[] indices = Strings.splitStringByCommaToArray(request.param("index")); IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.lenientExpandOpen()); - - syncedFlushService.attemptSyncedFlush(indices, indicesOptions, new RestBuilderListener(channel) { + SyncedFlushRequest syncedFlushRequest = new SyncedFlushRequest(Strings.splitStringByCommaToArray(request.param("index"))); + syncedFlushRequest.indicesOptions(indicesOptions); + client.admin().indices().syncedFlush(syncedFlushRequest, new RestBuilderListener(channel) { @Override - public RestResponse buildResponse(IndicesSyncedFlushResult results, XContentBuilder builder) throws Exception { + public RestResponse buildResponse(SyncedFlushResponse results, XContentBuilder builder) throws Exception { builder.startObject(); results.toXContent(builder, request); builder.endObject(); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java index 005b30e6207..bd7e62abf48 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/settings/RestUpdateSettingsAction.java @@ -88,6 +88,6 @@ public class RestUpdateSettingsAction extends BaseRestHandler { } updateSettingsRequest.settings(updateSettings); - client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener(channel)); + client.admin().indices().updateSettings(updateSettingsRequest, new AcknowledgedRestListener<>(channel)); } } diff --git a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java index 073a4eb5fa8..19bc4478884 100644 --- a/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java @@ -62,7 +62,7 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { NativeScriptFactory scriptFactory = scripts.get(script); if (scriptFactory != null) { return scriptFactory; diff --git a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java index 993c95ad797..41befc9406f 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptEngineService.java @@ -36,7 +36,7 @@ public interface ScriptEngineService extends Closeable { boolean sandboxed(); - Object compile(String script); + Object compile(String script, Map params); ExecutableScript executable(CompiledScript compiledScript, @Nullable Map vars); diff --git a/core/src/main/java/org/elasticsearch/script/ScriptService.java b/core/src/main/java/org/elasticsearch/script/ScriptService.java index 3b91f2d3110..c9e9f9a873d 100644 --- a/core/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/core/src/main/java/org/elasticsearch/script/ScriptService.java @@ -67,6 +67,7 @@ import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -96,9 +97,9 @@ public class ScriptService extends AbstractComponent implements Closeable { private final Map scriptEnginesByLang; private final Map scriptEnginesByExt; - private final ConcurrentMap staticCache = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap staticCache = ConcurrentCollections.newConcurrentMap(); - private final Cache cache; + private final Cache cache; private final Path scriptsDirectory; private final ScriptModes scriptModes; @@ -153,7 +154,7 @@ public class ScriptService extends AbstractComponent implements Closeable { this.defaultLang = settings.get(DEFAULT_SCRIPTING_LANGUAGE_SETTING, DEFAULT_LANG); - CacheBuilder cacheBuilder = CacheBuilder.builder(); + CacheBuilder cacheBuilder = CacheBuilder.builder(); if (cacheMaxSize >= 0) { cacheBuilder.setMaximumWeight(cacheMaxSize); } @@ -224,7 +225,7 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ - public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { + public CompiledScript compile(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext, Map params) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -252,14 +253,14 @@ public class ScriptService extends AbstractComponent implements Closeable { " operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are not supported"); } - return compileInternal(script, headersContext); + return compileInternal(script, headersContext, params); } /** * Compiles a script straight-away, or returns the previously compiled and cached script, * without checking if it can be executed based on settings. */ - public CompiledScript compileInternal(Script script, HasContextAndHeaders context) { + public CompiledScript compileInternal(Script script, HasContextAndHeaders context, Map params) { if (script == null) { throw new IllegalArgumentException("The parameter script (Script) must not be null."); } @@ -277,7 +278,7 @@ public class ScriptService extends AbstractComponent implements Closeable { ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); if (type == ScriptType.FILE) { - String cacheKey = getCacheKey(scriptEngineService, name, null); + CacheKey cacheKey = new CacheKey(scriptEngineService, name, null, params); //On disk scripts will be loaded into the staticCache by the listener CompiledScript compiledScript = staticCache.get(cacheKey); @@ -299,14 +300,14 @@ public class ScriptService extends AbstractComponent implements Closeable { code = getScriptFromIndex(indexedScript.lang, indexedScript.id, context); } - String cacheKey = getCacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code); + CacheKey cacheKey = new CacheKey(scriptEngineService, type == ScriptType.INLINE ? null : name, code, params); CompiledScript compiledScript = cache.get(cacheKey); if (compiledScript == null) { //Either an un-cached inline script or indexed script //If the script type is inline the name will be the same as the code for identification in exceptions try { - compiledScript = new CompiledScript(type, name, lang, scriptEngineService.compile(code)); + compiledScript = new CompiledScript(type, name, lang, scriptEngineService.compile(code, params)); } catch (Exception exception) { throw new ScriptException("Failed to compile " + type + " script [" + name + "] using lang [" + lang + "]", exception); } @@ -364,7 +365,7 @@ public class ScriptService extends AbstractComponent implements Closeable { //we don't know yet what the script will be used for, but if all of the operations for this lang with //indexed scripts are disabled, it makes no sense to even compile it. if (isAnyScriptContextEnabled(scriptLang, scriptEngineService, ScriptType.INDEXED)) { - Object compiled = scriptEngineService.compile(template.getScript()); + Object compiled = scriptEngineService.compile(template.getScript(), Collections.emptyMap()); if (compiled == null) { throw new IllegalArgumentException("Unable to parse [" + template.getScript() + "] lang [" + scriptLang + "] (ScriptService.compile returned null)"); @@ -436,8 +437,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided script */ - public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext) { - return executable(compile(script, scriptContext, headersContext), script.getParams()); + public ExecutableScript executable(Script script, ScriptContext scriptContext, HasContextAndHeaders headersContext, Map params) { + return executable(compile(script, scriptContext, headersContext, params), script.getParams()); } /** @@ -450,8 +451,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided search script */ - public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) { - CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current()); + public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext, Map params) { + CompiledScript compiledScript = compile(script, scriptContext, SearchContext.current(), params); return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript, lookup, script.getParams()); } @@ -491,9 +492,9 @@ public class ScriptService extends AbstractComponent implements Closeable { * {@code ScriptEngineService}'s {@code scriptRemoved} method when the * script has been removed from the cache */ - private class ScriptCacheRemovalListener implements RemovalListener { + private class ScriptCacheRemovalListener implements RemovalListener { @Override - public void onRemoval(RemovalNotification notification) { + public void onRemoval(RemovalNotification notification) { scriptMetrics.onCacheEviction(); for (ScriptEngineService service : scriptEngines) { try { @@ -539,8 +540,8 @@ public class ScriptService extends AbstractComponent implements Closeable { logger.info("compiling script file [{}]", file.toAbsolutePath()); try(InputStreamReader reader = new InputStreamReader(Files.newInputStream(file), StandardCharsets.UTF_8)) { String script = Streams.copyToString(reader); - String cacheKey = getCacheKey(engineService, scriptNameExt.v1(), null); - staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script))); + CacheKey cacheKey = new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap()); + staticCache.put(cacheKey, new CompiledScript(ScriptType.FILE, scriptNameExt.v1(), engineService.types()[0], engineService.compile(script, Collections.emptyMap()))); scriptMetrics.onCompilation(); } } else { @@ -565,7 +566,7 @@ public class ScriptService extends AbstractComponent implements Closeable { ScriptEngineService engineService = getScriptEngineServiceForFileExt(scriptNameExt.v2()); assert engineService != null; logger.info("removing script file [{}]", file.toAbsolutePath()); - staticCache.remove(getCacheKey(engineService, scriptNameExt.v1(), null)); + staticCache.remove(new CacheKey(engineService, scriptNameExt.v1(), null, Collections.emptyMap())); } } @@ -625,11 +626,44 @@ public class ScriptService extends AbstractComponent implements Closeable { } } - private static String getCacheKey(ScriptEngineService scriptEngineService, String name, String code) { - String lang = scriptEngineService.types()[0]; - return lang + ":" + (name != null ? ":" + name : "") + (code != null ? ":" + code : ""); + private static final class CacheKey { + final String lang; + final String name; + final String code; + final Map params; + + private CacheKey(final ScriptEngineService service, final String name, final String code, final Map params) { + this.lang = service.types()[0]; + this.name = name; + this.code = code; + this.params = params; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + CacheKey cacheKey = (CacheKey)o; + + if (!lang.equals(cacheKey.lang)) return false; + if (name != null ? !name.equals(cacheKey.name) : cacheKey.name != null) return false; + if (code != null ? !code.equals(cacheKey.code) : cacheKey.code != null) return false; + return params.equals(cacheKey.params); + + } + + @Override + public int hashCode() { + int result = lang.hashCode(); + result = 31 * result + (name != null ? name.hashCode() : 0); + result = 31 * result + (code != null ? code.hashCode() : 0); + result = 31 * result + params.hashCode(); + return result; + } } + private static class IndexedScript { private final String lang; private final String id; diff --git a/core/src/main/java/org/elasticsearch/search/SearchService.java b/core/src/main/java/org/elasticsearch/search/SearchService.java index 9501099997f..99f9b0ea0d7 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchService.java +++ b/core/src/main/java/org/elasticsearch/search/SearchService.java @@ -23,6 +23,7 @@ import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectSet; import com.carrotsearch.hppc.cursors.ObjectCursor; + import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; @@ -38,6 +39,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; @@ -70,7 +73,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer.TerminationHandle; import org.elasticsearch.indices.cache.request.IndicesRequestCache; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; @@ -83,12 +85,16 @@ import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsContext.FieldDataField; import org.elasticsearch.search.fetch.fielddata.FieldDataFieldsFetchSubPhase; import org.elasticsearch.search.fetch.script.ScriptFieldsContext.ScriptField; +import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.internal.*; import org.elasticsearch.search.internal.SearchContext.Lifetime; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.*; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; +import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -109,9 +115,10 @@ public class SearchService extends AbstractLifecycleComponent imp public static final String NORMS_LOADING_KEY = "index.norms.loading"; public static final String DEFAULT_KEEPALIVE_KEY = "search.default_keep_alive"; public static final String KEEPALIVE_INTERVAL_KEY = "search.keep_alive_interval"; - public static final String DEFAULT_SEARCH_TIMEOUT = "search.default_search_timeout"; public static final TimeValue NO_TIMEOUT = timeValueMillis(-1); + public static final Setting DEFAULT_SEARCH_TIMEOUT_SETTING = Setting.timeSetting("search.default_search_timeout", NO_TIMEOUT, true, Setting.Scope.CLUSTER); + private final ThreadPool threadPool; @@ -150,7 +157,7 @@ public class SearchService extends AbstractLifecycleComponent imp private final ParseFieldMatcher parseFieldMatcher; @Inject - public SearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService,IndicesWarmer indicesWarmer, ThreadPool threadPool, + public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer, ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) { super(settings); @@ -184,19 +191,12 @@ public class SearchService extends AbstractLifecycleComponent imp this.indicesWarmer.addListener(new FieldDataWarmer(indicesWarmer)); this.indicesWarmer.addListener(new SearchWarmer()); - defaultSearchTimeout = settings.getAsTime(DEFAULT_SEARCH_TIMEOUT, NO_TIMEOUT); - nodeSettingsService.addListener(new SearchSettingsListener()); + defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); } - class SearchSettingsListener implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - final TimeValue maybeNewDefaultSearchTimeout = settings.getAsTime(SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout); - if (!maybeNewDefaultSearchTimeout.equals(SearchService.this.defaultSearchTimeout)) { - logger.info("updating [{}] from [{}] to [{}]", SearchService.DEFAULT_SEARCH_TIMEOUT, SearchService.this.defaultSearchTimeout, maybeNewDefaultSearchTimeout); - SearchService.this.defaultSearchTimeout = maybeNewDefaultSearchTimeout; - } - } + private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { + this.defaultSearchTimeout = defaultSearchTimeout; } @Override @@ -549,7 +549,7 @@ public class SearchService extends AbstractLifecycleComponent imp Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; - SearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout); + DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher, indexService, indexShard, scriptService, pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, defaultSearchTimeout); SearchContext.setCurrent(context); try { @@ -558,7 +558,7 @@ public class SearchService extends AbstractLifecycleComponent imp context.scrollContext().scroll = request.scroll(); } if (request.template() != null) { - ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context); + ExecutableScript executable = this.scriptService.executable(request.template(), ScriptContext.Standard.SEARCH, context, Collections.emptyMap()); BytesReference run = (BytesReference) executable.run(); try (XContentParser parser = XContentFactory.xContent(run).createParser(run)) { QueryParseContext queryParseContext = new QueryParseContext(indicesService.getIndicesQueryRegistry()); @@ -656,7 +656,7 @@ public class SearchService extends AbstractLifecycleComponent imp } } - private void parseSource(SearchContext context, SearchSourceBuilder source) throws SearchParseException { + private void parseSource(DefaultSearchContext context, SearchSourceBuilder source) throws SearchContextException { // nothing to parse... if (source == null) { return; @@ -712,6 +712,9 @@ public class SearchService extends AbstractLifecycleComponent imp if (source.minScore() != null) { context.minimumScore(source.minScore()); } + if (source.profile()) { + context.setProfilers(new Profilers(context.searcher())); + } context.timeoutInMillis(source.timeoutInMillis()); context.terminateAfter(source.terminateAfter()); if (source.aggregations() != null) { @@ -807,19 +810,11 @@ public class SearchService extends AbstractLifecycleComponent imp fieldDataFieldsContext.setHitExecutionNeeded(true); } if (source.highlighter() != null) { - XContentParser highlighterParser = null; + HighlightBuilder highlightBuilder = source.highlighter(); try { - highlighterParser = XContentFactory.xContent(source.highlighter()).createParser(source.highlighter()); - this.elementParsers.get("highlight").parse(highlighterParser, context); - } catch (Exception e) { - String sSource = "_na_"; - try { - sSource = source.toString(); - } catch (Throwable e1) { - // ignore - } - XContentLocation location = highlighterParser != null ? highlighterParser.getTokenLocation() : null; - throw new SearchParseException(context, "failed to parse suggest source [" + sSource + "]", location, e); + context.highlight(highlightBuilder.build(context.indexShard().getQueryShardContext())); + } catch (IOException e) { + throw new SearchContextException(context, "failed to create SearchContextHighlighter", e); } } if (source.innerHits() != null) { @@ -841,7 +836,7 @@ public class SearchService extends AbstractLifecycleComponent imp } if (source.scriptFields() != null) { for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) { - SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH); + SearchScript searchScript = context.scriptService().search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH, Collections.emptyMap()); context.scriptFields().add(new ScriptField(field.fieldName(), searchScript, field.ignoreFailure())); } } diff --git a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java index 1a12751d396..c648436c3a9 100644 --- a/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java +++ b/core/src/main/java/org/elasticsearch/search/SearchShardTarget.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import java.io.IOException; @@ -42,8 +42,8 @@ public class SearchShardTarget implements Streamable, Comparable parentOrdToOtherBuckets; private boolean multipleBucketsPerParentOrd = false; - // This needs to be a Set to avoid duplicate reader context entries via (#setNextReader(...), it can get invoked multiple times with the same reader context) - private Set replay = new LinkedHashSet<>(); - public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext, Aggregator parent, String parentType, Query childFilter, Query parentFilter, ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, @@ -99,17 +97,11 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - if (replay == null) { - throw new IllegalStateException(); - } final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); assert globalOrdinals != null; Scorer parentScorer = parentFilter.scorer(ctx); final Bits parentDocs = Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer); - if (childFilter.scorer(ctx) != null) { - replay.add(ctx); - } return new LeafBucketCollector() { @Override @@ -138,10 +130,8 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { @Override protected void doPostCollection() throws IOException { - final Set replay = this.replay; - this.replay = null; - - for (LeafReaderContext ctx : replay) { + IndexReader indexReader = context().searchContext().searcher().getIndexReader(); + for (LeafReaderContext ctx : indexReader.leaves()) { DocIdSetIterator childDocsIter = childFilter.scorer(ctx); if (childDocsIter == null) { continue; diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index da3bc286ff9..faca359d766 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -23,7 +23,6 @@ import org.apache.lucene.util.PriorityQueue; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.rounding.Rounding; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -151,7 +150,7 @@ public class InternalHistogram extends Inter @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (formatter != ValueFormatter.RAW) { - Text keyTxt = new StringText(formatter.format(key)); + Text keyTxt = new Text(formatter.format(key)); if (keyed) { builder.startObject(keyTxt.string()); } else { @@ -392,12 +391,14 @@ public class InternalHistogram extends Inter return reducedBuckets; } - private void addEmptyBuckets(List list) { + private void addEmptyBuckets(List list, ReduceContext reduceContext) { B lastBucket = null; ExtendedBounds bounds = emptyBucketInfo.bounds; ListIterator iter = list.listIterator(); // first adding all the empty buckets *before* the actual data (based on th extended_bounds.min the user requested) + InternalAggregations reducedEmptySubAggs = InternalAggregations.reduce(Collections.singletonList(emptyBucketInfo.subAggregations), + reduceContext); if (bounds != null) { B firstBucket = iter.hasNext() ? list.get(iter.nextIndex()) : null; if (firstBucket == null) { @@ -405,7 +406,9 @@ public class InternalHistogram extends Inter long key = bounds.min; long max = bounds.max; while (key <= max) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, + keyed, formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } } @@ -414,7 +417,9 @@ public class InternalHistogram extends Inter long key = bounds.min; if (key < firstBucket.key) { while (key < firstBucket.key) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, + keyed, formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } } @@ -429,7 +434,9 @@ public class InternalHistogram extends Inter if (lastBucket != null) { long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key); while (key < nextBucket.key) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, keyed, + formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } assert key == nextBucket.key; @@ -442,7 +449,9 @@ public class InternalHistogram extends Inter long key = emptyBucketInfo.rounding.nextRoundingValue(lastBucket.key); long max = bounds.max; while (key <= max) { - iter.add(getFactory().createBucket(key, 0, emptyBucketInfo.subAggregations, keyed, formatter)); + iter.add(getFactory().createBucket(key, 0, + reducedEmptySubAggs, keyed, + formatter)); key = emptyBucketInfo.rounding.nextRoundingValue(key); } } @@ -454,7 +463,7 @@ public class InternalHistogram extends Inter // adding empty buckets if needed if (minDocCount == 0) { - addEmptyBuckets(reducedBuckets); + addEmptyBuckets(reducedBuckets, reduceContext); } if (order == InternalOrder.KEY_ASC) { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index 046ca717b9f..b6d1d56d07b 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -82,7 +83,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { @Override public void initialize(InternalAggregation.ReduceContext context) { - searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context); + searchScript = context.scriptService().executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap()); searchScript.setNextVar("_subset_freq", subsetDfHolder); searchScript.setNextVar("_subset_size", subsetSizeHolder); searchScript.setNextVar("_superset_freq", supersetDfHolder); @@ -170,7 +171,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { } ExecutableScript searchScript; try { - searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context); + searchScript = scriptService.executable(script, ScriptContext.Standard.AGGS, context, Collections.emptyMap()); } catch (Exception e) { throw new ElasticsearchParseException("failed to parse [{}] significance heuristic. the script [{}] could not be loaded", e, script, heuristicName); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 26c2eee2f6b..c270517cd9d 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -91,7 +91,7 @@ public class LongTerms extends InternalTerms { @Override public String getKeyAsString() { - return String.valueOf(term); + return formatter.format(term); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index d39a0335ac3..00c6b6b49bb 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -91,7 +92,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement vars.putAll(firstAggregation.reduceScript.getParams()); } CompiledScript compiledScript = reduceContext.scriptService().compile(firstAggregation.reduceScript, - ScriptContext.Standard.AGGS, reduceContext); + ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap()); ExecutableScript script = reduceContext.scriptService().executable(compiledScript, vars); aggregation = script.run(); } else { diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index 2c1caaa5241..6603c6289b2 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -58,11 +59,11 @@ public class ScriptedMetricAggregator extends MetricsAggregator { this.params = params; ScriptService scriptService = context.searchContext().scriptService(); if (initScript != null) { - scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext()).run(); + scriptService.executable(initScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap()).run(); } - this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS); + this.mapScript = scriptService.search(context.searchContext().lookup(), mapScript, ScriptContext.Standard.AGGS, Collections.emptyMap()); if (combineScript != null) { - this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext()); + this.combineScript = scriptService.executable(combineScript, ScriptContext.Standard.AGGS, context.searchContext(), Collections.emptyMap()); } else { this.combineScript = null; } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java index 32b5d7390d2..1efd4a7cd24 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.tophits; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -194,7 +193,7 @@ public class TopHitsBuilder extends AbstractAggregationBuilder { return sourceBuilder; } - public BytesReference highlighter() { + public HighlightBuilder highlighter() { return sourceBuilder().highlighter(); } diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java index 789f8c961a3..e5ccbf6971a 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketscript/BucketScriptPipelineAggregator.java @@ -90,7 +90,7 @@ public class BucketScriptPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation originalAgg = (InternalMultiBucketAggregation) aggregation; List buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap()); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java index 669a223b215..edc3b4e87ce 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/pipeline/having/BucketSelectorPipelineAggregator.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -88,7 +89,7 @@ public class BucketSelectorPipelineAggregator extends PipelineAggregator { InternalMultiBucketAggregation originalAgg = (InternalMultiBucketAggregation) aggregation; List buckets = originalAgg.getBuckets(); - CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext); + CompiledScript compiledScript = reduceContext.scriptService().compile(script, ScriptContext.Standard.AGGS, reduceContext, Collections.emptyMap()); List newBuckets = new ArrayList<>(); for (Bucket bucket : buckets) { Map vars = new HashMap<>(); diff --git a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java index 506c9d16d7c..a9dcc77ee9f 100644 --- a/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java +++ b/core/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java @@ -43,6 +43,7 @@ import org.elasticsearch.search.internal.SearchContext; import org.joda.time.DateTimeZone; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -227,7 +228,7 @@ public class ValuesSourceParser { } private SearchScript createScript() { - return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS); + return input.script == null ? null : context.scriptService().search(context.lookup(), input.script, ScriptContext.Standard.AGGS, Collections.emptyMap()); } private static ValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) { diff --git a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 7963b678fb3..3ea2d604b8a 100644 --- a/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -22,6 +22,7 @@ package org.elasticsearch.search.builder; import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.elasticsearch.Version; import org.elasticsearch.action.support.ToXContentToBytes; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -91,6 +92,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ public static final ParseField RESCORE_FIELD = new ParseField("rescore"); public static final ParseField STATS_FIELD = new ParseField("stats"); public static final ParseField EXT_FIELD = new ParseField("ext"); + public static final ParseField PROFILE_FIELD = new ParseField("profile"); private static final SearchSourceBuilder PROTOTYPE = new SearchSourceBuilder(); @@ -144,7 +146,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private List aggregations; - private BytesReference highlightBuilder; + private HighlightBuilder highlightBuilder; private BytesReference suggestBuilder; @@ -158,6 +160,9 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ private BytesReference ext = null; + private boolean profile = false; + + /** * Constructs a new search source builder. */ @@ -405,22 +410,14 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ * Adds highlight to perform as part of the search. */ public SearchSourceBuilder highlighter(HighlightBuilder highlightBuilder) { - try { - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - highlightBuilder.innerXContent(builder); - builder.endObject(); - this.highlightBuilder = builder.bytes(); - return this; - } catch (IOException e) { - throw new RuntimeException(e); - } + this.highlightBuilder = highlightBuilder; + return this; } /** - * Gets the bytes representing the hightlighter builder for this request. + * Gets the hightlighter builder for this request. */ - public BytesReference highlighter() { + public HighlightBuilder highlighter() { return highlightBuilder; } @@ -483,6 +480,22 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ return this; } + /** + * Should the query be profiled. Defaults to false + */ + public SearchSourceBuilder profile(boolean profile) { + this.profile = profile; + return this; + } + + /** + * Return whether to profile query execution, or {@code null} if + * unspecified. + */ + public boolean profile() { + return profile; + } + /** * Gets the bytes representing the rescore builders for this request. */ @@ -731,6 +744,8 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.fieldNames = fieldNames; } else if (context.parseFieldMatcher().match(currentFieldName, SORT_FIELD)) { builder.sort(parser.text()); + } else if (context.parseFieldMatcher().match(currentFieldName, PROFILE_FIELD)) { + builder.profile = parser.booleanValue(); } else { throw new ParsingException(parser.getTokenLocation(), "Unknown key for a " + token + " in [" + currentFieldName + "].", parser.getTokenLocation()); @@ -813,8 +828,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } builder.aggregations = aggregations; } else if (context.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FIELD)) { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); - builder.highlightBuilder = xContentBuilder.bytes(); + builder.highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); } else if (context.parseFieldMatcher().match(currentFieldName, INNER_HITS_FIELD)) { XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().copyCurrentStructure(parser); builder.innerHitsBuilder = xContentBuilder.bytes(); @@ -940,6 +954,10 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ builder.field(EXPLAIN_FIELD.getPreferredName(), explain); } + if (profile) { + builder.field("profile", true); + } + if (fetchSourceContext != null) { builder.field(_SOURCE_FIELD.getPreferredName(), fetchSourceContext); } @@ -1012,10 +1030,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } if (highlightBuilder != null) { - builder.field(HIGHLIGHT_FIELD.getPreferredName()); - XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(highlightBuilder); - parser.nextToken(); - builder.copyCurrentStructure(parser); + this.highlightBuilder.toXContent(builder, params); } if (innerHitsBuilder != null) { @@ -1158,7 +1173,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ } builder.from = in.readVInt(); if (in.readBoolean()) { - builder.highlightBuilder = in.readBytesReference(); + builder.highlightBuilder = HighlightBuilder.PROTOTYPE.readFrom(in); } boolean hasIndexBoost = in.readBoolean(); if (hasIndexBoost) { @@ -1224,6 +1239,11 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (in.readBoolean()) { builder.ext = in.readBytesReference(); } + if (in.getVersion().onOrAfter(Version.V_2_2_0)) { + builder.profile = in.readBoolean(); + } else { + builder.profile = false; + } return builder; } @@ -1259,7 +1279,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ boolean hasHighlightBuilder = highlightBuilder != null; out.writeBoolean(hasHighlightBuilder); if (hasHighlightBuilder) { - out.writeBytesReference(highlightBuilder); + highlightBuilder.writeTo(out); } boolean hasIndexBoost = indexBoost != null; out.writeBoolean(hasIndexBoost); @@ -1337,13 +1357,16 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ if (hasExt) { out.writeBytesReference(ext); } + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { + out.writeBoolean(profile); + } } @Override public int hashCode() { return Objects.hash(aggregations, explain, fetchSourceContext, fieldDataFields, fieldNames, from, highlightBuilder, indexBoost, innerHitsBuilder, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, - size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version); + size, sorts, stats, suggestBuilder, terminateAfter, timeoutInMillis, trackScores, version, profile); } @Override @@ -1376,6 +1399,7 @@ public final class SearchSourceBuilder extends ToXContentToBytes implements Writ && Objects.equals(terminateAfter, other.terminateAfter) && Objects.equals(timeoutInMillis, other.timeoutInMillis) && Objects.equals(trackScores, other.trackScores) - && Objects.equals(version, other.version); + && Objects.equals(version, other.version) + && Objects.equals(profile, other.profile); } } diff --git a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index f76527163cb..835e6e71425 100644 --- a/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/core/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -43,7 +43,6 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; @@ -52,9 +51,11 @@ import org.elasticsearch.search.fetch.FetchSearchResultProvider; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.profile.InternalProfileShardResults; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.query.QuerySearchResultProvider; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.search.profile.ProfileShardResult; import java.io.IOException; import java.util.ArrayList; @@ -410,6 +411,17 @@ public class SearchPhaseController extends AbstractComponent { } } + //Collect profile results + InternalProfileShardResults shardResults = null; + if (!queryResults.isEmpty() && firstResult.profileResults() != null) { + Map> profileResults = new HashMap<>(queryResults.size()); + for (AtomicArray.Entry entry : queryResults) { + String key = entry.value.queryResult().shardTarget().toString(); + profileResults.put(key, entry.value.queryResult().profileResults()); + } + shardResults = new InternalProfileShardResults(profileResults); + } + if (aggregations != null) { List pipelineAggregators = firstResult.pipelineAggregators(); if (pipelineAggregators != null) { @@ -427,7 +439,7 @@ public class SearchPhaseController extends AbstractComponent { InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore); - return new InternalSearchResponse(searchHits, aggregations, suggest, timedOut, terminatedEarly); + return new InternalSearchResponse(searchHits, aggregations, suggest, shardResults, timedOut, terminatedEarly); } } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index 227141e4ddf..04890700be8 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -198,7 +198,7 @@ public class FetchPhase implements SearchPhase { DocumentMapper documentMapper = context.mapperService().documentMapper(fieldsVisitor.uid().type()); Text typeText; if (documentMapper == null) { - typeText = new StringAndBytesText(fieldsVisitor.uid().type()); + typeText = new Text(fieldsVisitor.uid().type()); } else { typeText = documentMapper.typeText(); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java index 7941e177750..2e76a4c3703 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsBuilder.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.fetch.innerhits; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -266,7 +265,7 @@ public class InnerHitsBuilder implements ToXContent { return this; } - public BytesReference highlighter() { + public HighlightBuilder highlighter() { return sourceBuilder().highlighter(); } diff --git a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java index 6dbdcbd589a..de1703b5c98 100644 --- a/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java +++ b/core/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java @@ -30,6 +30,7 @@ import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.internal.SearchContext; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -97,9 +98,9 @@ public class ScriptFieldsParseElement implements SearchParseElement { throw new SearchParseException(context, "must specify a script in script fields", parser.getTokenLocation()); } - SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH); + SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); context.scriptFields().add(new ScriptFieldsContext.ScriptField(fieldName, searchScript, ignoreException)); } } } -} \ No newline at end of file +} diff --git a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java index d30144f777f..b4de465cc74 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/AbstractHighlighterBuilder.java @@ -22,13 +22,19 @@ package org.elasticsearch.search.highlight; import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryParseContext; +import org.elasticsearch.search.highlight.HighlightBuilder.Order; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; +import java.util.List; import java.util.Map; import java.util.Objects; @@ -74,7 +80,7 @@ public abstract class AbstractHighlighterBuilder highlightQuery; - protected String order; + protected Order order; protected Boolean highlightFilter; @@ -213,18 +219,26 @@ public abstract class AbstractHighlighterBuilderscore, which then it will be ordered - * by score of the fragments. + * by score of the fragments, or none. + */ + public HB order(String order) { + return order(Order.fromString(order)); + } + + /** + * By default, fragments of a field are ordered by the order in the highlighted text. + * If set to {@link Order#SCORE}, this changes order to score of the fragments. */ @SuppressWarnings("unchecked") - public HB order(String order) { - this.order = order; + public HB order(Order scoreOrdered) { + this.order = scoreOrdered; return (HB) this; } /** - * @return the value set by {@link #order(String)} + * @return the value set by {@link #order(Order)} */ - public String order() { + public Order order() { return this.order; } @@ -391,7 +405,7 @@ public abstract class AbstractHighlighterBuilder preTagsList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + preTagsList.add(parser.text()); + } + highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()])); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) { + List postTagsList = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + postTagsList.add(parser.text()); + } + highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()])); + } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) { + throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName); + } + } else if (token.isValue()) { + if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) { + highlightBuilder.order(Order.fromString(parser.text())); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) { + highlightBuilder.highlightFilter(parser.booleanValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) { + highlightBuilder.fragmentSize(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) { + highlightBuilder.numOfFragments(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) { + highlightBuilder.requireFieldMatch(parser.booleanValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) { + highlightBuilder.boundaryMaxScan(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) { + highlightBuilder.boundaryChars(parser.text().toCharArray()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) { + highlightBuilder.highlighterType(parser.text()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) { + highlightBuilder.fragmenter(parser.text()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) { + highlightBuilder.noMatchSize(parser.intValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) { + highlightBuilder.forceSource(parser.booleanValue()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) { + highlightBuilder.phraseLimit(parser.intValue()); + } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) { + throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName); + } + } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) { + if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) { + highlightBuilder.options(parser.map()); + } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) { + highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder()); + } else if (false == highlightBuilder.doFromXContent(parseContext, currentFieldName, token)) { + throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName); + } + } else if (currentFieldName != null) { + throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName); + } + } + + if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) { + throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set"); + } + return highlightBuilder; + } + + /** + * @param parser the input parser. Implementing classes might advance the parser depending on the + * information they need to instantiate a new instance + * @return a new instance + */ + protected abstract HB createInstance(XContentParser parser) throws IOException; + + /** + * Implementing subclasses can handle parsing special options depending on the + * current token, field name and the parse context. + * @return true if an option was found and successfully parsed, otherwise false + */ + protected abstract boolean doFromXContent(QueryParseContext parseContext, String currentFieldName, XContentParser.Token endMarkerToken) throws IOException; + @Override public final int hashCode() { return Objects.hash(getClass(), Arrays.hashCode(preTags), Arrays.hashCode(postTags), fragmentSize, @@ -480,7 +588,9 @@ public abstract class AbstractHighlighterBuilder 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize(); @@ -170,7 +170,7 @@ public class FastVectorHighlighter implements Highlighter { fragments = entry.fragmentsBuilder.createFragments(hitContext.reader(), hitContext.docId(), mapper.fieldType().names().indexName(), fieldFragList, 1, field.fieldOptions().preTags(), field.fieldOptions().postTags(), encoder); if (fragments != null && fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java index e45303ccb58..c0b1aeea3be 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightBuilder.java @@ -30,11 +30,13 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions.Builder; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -42,6 +44,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Objects; import java.util.Set; @@ -230,117 +233,45 @@ public class HighlightBuilder extends AbstractHighlighterBuilder preTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - preTagsList.add(parser.text()); - } - highlightBuilder.preTags(preTagsList.toArray(new String[preTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, POST_TAGS_FIELD)) { - List postTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - postTagsList.add(parser.text()); - } - highlightBuilder.postTags(postTagsList.toArray(new String[postTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) { - highlightBuilder.useExplicitFieldOrder(true); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.START_OBJECT) { - String highlightFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - if (highlightFieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field"); - } - highlightFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext)); - } - } - } else { - throw new ParsingException(parser.getTokenLocation(), "If highlighter fields is an array it must contain objects containing a single field"); - } - } - } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", topLevelFieldName); - } - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(topLevelFieldName, ORDER_FIELD)) { - highlightBuilder.order(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TAGS_SCHEMA_FIELD)) { - highlightBuilder.tagsSchema(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_FILTER_FIELD)) { - highlightBuilder.highlightFilter(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENT_SIZE_FIELD)) { - highlightBuilder.fragmentSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NUMBER_OF_FRAGMENTS_FIELD)) { - highlightBuilder.numOfFragments(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, ENCODER_FIELD)) { - highlightBuilder.encoder(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, REQUIRE_FIELD_MATCH_FIELD)) { - highlightBuilder.requireFieldMatch(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_MAX_SCAN_FIELD)) { - highlightBuilder.boundaryMaxScan(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, BOUNDARY_CHARS_FIELD)) { - highlightBuilder.boundaryChars(parser.text().toCharArray()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, TYPE_FIELD)) { - highlightBuilder.highlighterType(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FRAGMENTER_FIELD)) { - highlightBuilder.fragmenter(parser.text()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, NO_MATCH_SIZE_FIELD)) { - highlightBuilder.noMatchSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FORCE_SOURCE_FIELD)) { - highlightBuilder.forceSource(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, PHRASE_LIMIT_FIELD)) { - highlightBuilder.phraseLimit(parser.intValue()); - } else { - throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", topLevelFieldName); - } - } else if (token == XContentParser.Token.START_OBJECT && topLevelFieldName != null) { - if (parseContext.parseFieldMatcher().match(topLevelFieldName, OPTIONS_FIELD)) { - highlightBuilder.options(parser.map()); - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, FIELDS_FIELD)) { - String highlightFieldName = null; + boolean foundCurrentFieldMatch = false; + if (currentToken.isValue()) { + if (parseContext.parseFieldMatcher().match(currentFieldName, TAGS_SCHEMA_FIELD)) { + tagsSchema(parser.text()); + foundCurrentFieldMatch = true; + } else if (parseContext.parseFieldMatcher().match(currentFieldName, ENCODER_FIELD)) { + encoder(parser.text()); + foundCurrentFieldMatch = true; + } + } else if (currentToken == Token.START_ARRAY && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { + useExplicitFieldOrder(true); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.START_OBJECT) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { - highlightFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - highlightBuilder.field(Field.fromXContent(highlightFieldName, parseContext)); + field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext)); } } - } else if (parseContext.parseFieldMatcher().match(topLevelFieldName, HIGHLIGHT_QUERY_FIELD)) { - highlightBuilder.highlightQuery(parseContext.parseInnerQueryBuilder()); + foundCurrentFieldMatch = true; } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", topLevelFieldName); + throw new ParsingException(parser.getTokenLocation(), + "If highlighter fields is an array it must contain objects containing a single field"); } - } else if (topLevelFieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, topLevelFieldName); } + } else if (currentToken == Token.START_OBJECT && parseContext.parseFieldMatcher().match(currentFieldName, FIELDS_FIELD)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + field(HighlightBuilder.Field.PROTOTYPE.fromXContent(parseContext)); + } + } + foundCurrentFieldMatch = true; } - - if (highlightBuilder.preTags() != null && highlightBuilder.postTags() == null) { - throw new ParsingException(parser.getTokenLocation(), "Highlighter global preTags are set, but global postTags are not set"); - } - return highlightBuilder; + return foundCurrentFieldMatch; } public SearchContextHighlight build(QueryShardContext context) throws IOException { @@ -378,9 +309,15 @@ public class HighlightBuilder extends AbstractHighlighterBuilder preTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - preTagsList.add(parser.text()); - } - field.preTags(preTagsList.toArray(new String[preTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, POST_TAGS_FIELD)) { - List postTagsList = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - postTagsList.add(parser.text()); - } - field.postTags(postTagsList.toArray(new String[postTagsList.size()])); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD)) { - List matchedFields = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - matchedFields.add(parser.text()); - } - field.matchedFields(matchedFields.toArray(new String[matchedFields.size()])); - } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse array with name [{}]", currentFieldName); - } - } else if (token.isValue()) { - if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_SIZE_FIELD)) { - field.fragmentSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NUMBER_OF_FRAGMENTS_FIELD)) { - field.numOfFragments(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD)) { - field.fragmentOffset(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_FILTER_FIELD)) { - field.highlightFilter(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, ORDER_FIELD)) { - field.order(parser.text()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, REQUIRE_FIELD_MATCH_FIELD)) { - field.requireFieldMatch(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_MAX_SCAN_FIELD)) { - field.boundaryMaxScan(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, BOUNDARY_CHARS_FIELD)) { - field.boundaryChars(parser.text().toCharArray()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, TYPE_FIELD)) { - field.highlighterType(parser.text()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENTER_FIELD)) { - field.fragmenter(parser.text()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, NO_MATCH_SIZE_FIELD)) { - field.noMatchSize(parser.intValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, FORCE_SOURCE_FIELD)) { - field.forceSource(parser.booleanValue()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, PHRASE_LIMIT_FIELD)) { - field.phraseLimit(parser.intValue()); - } else { - throw new ParsingException(parser.getTokenLocation(), "unexpected fieldname [{}]", currentFieldName); - } - } else if (token == XContentParser.Token.START_OBJECT && currentFieldName != null) { - if (parseContext.parseFieldMatcher().match(currentFieldName, HIGHLIGHT_QUERY_FIELD)) { - field.highlightQuery(parseContext.parseInnerQueryBuilder()); - } else if (parseContext.parseFieldMatcher().match(currentFieldName, OPTIONS_FIELD)) { - field.options(parser.map()); - } else { - throw new ParsingException(parser.getTokenLocation(), "cannot parse object with name [{}]", currentFieldName); - } - } else if (currentFieldName != null) { - throw new ParsingException(parser.getTokenLocation(), "unexpected token [{}] after [{}]", token, currentFieldName); + boolean foundCurrentFieldMatch = false; + if (parseContext.parseFieldMatcher().match(currentFieldName, FRAGMENT_OFFSET_FIELD) && currentToken.isValue()) { + fragmentOffset(parser.intValue()); + foundCurrentFieldMatch = true; + } else if (parseContext.parseFieldMatcher().match(currentFieldName, MATCHED_FIELDS_FIELD) + && currentToken == XContentParser.Token.START_ARRAY) { + List matchedFields = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + matchedFields.add(parser.text()); } + matchedFields(matchedFields.toArray(new String[matchedFields.size()])); + foundCurrentFieldMatch = true; + } + return foundCurrentFieldMatch; + } + + @Override + protected Field createInstance(XContentParser parser) throws IOException { + if (parser.currentToken() == XContentParser.Token.FIELD_NAME) { + String fieldname = parser.currentName(); + return new Field(fieldname); + } else { + throw new ParsingException(parser.getTokenLocation(), "unknown token type [{}], expected field name", parser.currentToken()); } - return field; } @Override @@ -654,4 +560,36 @@ public class HighlightBuilder extends AbstractHighlighterBuilder { + NONE, SCORE; + + static Order PROTOTYPE = NONE; + + @Override + public Order readFrom(StreamInput in) throws IOException { + int ordinal = in.readVInt(); + if (ordinal < 0 || ordinal >= values().length) { + throw new IOException("Unknown Order ordinal [" + ordinal + "]"); + } + return values()[ordinal]; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(this.ordinal()); + } + + public static Order fromString(String order) { + if (order.toUpperCase(Locale.ROOT).equals(SCORE.name())) { + return Order.SCORE; + } + return NONE; + } + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } } diff --git a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java index 9077278d515..30530b697f3 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/HighlightField.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.highlight; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import java.io.IOException; @@ -90,7 +89,7 @@ public class HighlightField implements Streamable { if (in.readBoolean()) { int size = in.readVInt(); if (size == 0) { - fragments = StringText.EMPTY_ARRAY; + fragments = Text.EMPTY_ARRAY; } else { fragments = new Text[size]; for (int i = 0; i < size; i++) { diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java index 041ed754d76..5f4cdddb060 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java @@ -33,9 +33,7 @@ import org.apache.lucene.search.highlight.SimpleSpanFragmenter; import org.apache.lucene.search.highlight.TextFragment; import org.apache.lucene.util.BytesRefHash; import org.apache.lucene.util.CollectionUtil; -import org.apache.lucene.util.IOUtils; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.fetch.FetchPhaseExecutionException; @@ -158,7 +156,7 @@ public class PlainHighlighter implements Highlighter { } if (fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } int noMatchSize = highlighterContext.field.fieldOptions().noMatchSize(); @@ -172,7 +170,7 @@ public class PlainHighlighter implements Highlighter { throw new FetchPhaseExecutionException(context, "Failed to highlight field [" + highlighterContext.fieldName + "]", e); } if (end > 0) { - return new HighlightField(highlighterContext.fieldName, new Text[] { new StringText(fieldContents.substring(0, end)) }); + return new HighlightField(highlighterContext.fieldName, new Text[] { new Text(fieldContents.substring(0, end)) }); } } return null; diff --git a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java index e11840e89e7..2509f95da59 100644 --- a/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java +++ b/core/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java @@ -28,7 +28,7 @@ import org.apache.lucene.search.postingshighlight.CustomSeparatorBreakIterator; import org.apache.lucene.search.postingshighlight.Snippet; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.fetch.FetchPhaseExecutionException; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -122,7 +122,7 @@ public class PostingsHighlighter implements Highlighter { } if (fragments.length > 0) { - return new HighlightField(highlighterContext.fieldName, StringText.convertFromStringArray(fragments)); + return new HighlightField(highlighterContext.fieldName, Text.convertFromStringArray(fragments)); } return null; diff --git a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 0a9b860edb7..a7bacb64d94 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -26,6 +26,9 @@ import org.apache.lucene.search.*; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.search.dfs.AggregatedDfs; +import org.elasticsearch.search.profile.ProfileBreakdown; +import org.elasticsearch.search.profile.ProfileWeight; +import org.elasticsearch.search.profile.Profiler; import java.io.IOException; @@ -43,26 +46,44 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { private final Engine.Searcher engineSearcher; - public ContextIndexSearcher(SearchContext searchContext, Engine.Searcher searcher) { + // TODO revisit moving the profiler to inheritance or wrapping model in the future + private Profiler profiler; + + public ContextIndexSearcher(Engine.Searcher searcher, + QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) { super(searcher.reader()); in = searcher.searcher(); engineSearcher = searcher; setSimilarity(searcher.searcher().getSimilarity(true)); - setQueryCache(searchContext.getQueryCache()); - setQueryCachingPolicy(searchContext.indexShard().getQueryCachingPolicy()); + setQueryCache(queryCache); + setQueryCachingPolicy(queryCachingPolicy); } @Override public void close() { } + public void setProfiler(Profiler profiler) { + this.profiler = profiler; + } + public void setAggregatedDfs(AggregatedDfs aggregatedDfs) { this.aggregatedDfs = aggregatedDfs; } @Override public Query rewrite(Query original) throws IOException { - return in.rewrite(original); + if (profiler != null) { + profiler.startRewriteTime(); + } + + try { + return in.rewrite(original); + } finally { + if (profiler != null) { + profiler.stopAndAddRewriteTime(); + } + } } @Override @@ -72,8 +93,34 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { if (aggregatedDfs != null && needsScores) { // if scores are needed and we have dfs data then use it return super.createNormalizedWeight(query, needsScores); + } else if (profiler != null) { + // we need to use the createWeight method to insert the wrappers + return super.createNormalizedWeight(query, needsScores); + } else { + return in.createNormalizedWeight(query, needsScores); + } + } + + @Override + public Weight createWeight(Query query, boolean needsScores) throws IOException { + if (profiler != null) { + // createWeight() is called for each query in the tree, so we tell the queryProfiler + // each invocation so that it can build an internal representation of the query + // tree + ProfileBreakdown profile = profiler.getQueryBreakdown(query); + profile.startTime(ProfileBreakdown.TimingType.CREATE_WEIGHT); + final Weight weight; + try { + weight = super.createWeight(query, needsScores); + } finally { + profile.stopAndRecordTime(); + profiler.pollLastQuery(); + } + return new ProfileWeight(query, weight, profile); + } else { + // needs to be 'super', not 'in' in order to use aggregated DFS + return super.createWeight(query, needsScores); } - return in.createNormalizedWeight(query, needsScores); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index 1174fcdd8a9..2d3f6590629 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -58,6 +58,8 @@ import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.query.QueryPhaseExecutionException; +import org.elasticsearch.search.profile.Profiler; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -129,10 +131,10 @@ public class DefaultSearchContext extends SearchContext { private List rescore; private SearchLookup searchLookup; private volatile long keepAlive; - private ScoreDoc lastEmittedDoc; private final long originNanoTime = System.nanoTime(); private volatile long lastAccessTime = -1; private InnerHitsContext innerHitsContext; + private Profilers profilers; private final Map subPhaseContexts = new HashMap<>(); private final Map, Collector> queryCollectors = new HashMap<>(); @@ -158,7 +160,7 @@ public class DefaultSearchContext extends SearchContext { this.fetchResult = new FetchSearchResult(id, shardTarget); this.indexShard = indexShard; this.indexService = indexService; - this.searcher = new ContextIndexSearcher(this, engineSearcher); + this.searcher = new ContextIndexSearcher(engineSearcher, indexService.cache().query(), indexShard.getQueryCachingPolicy()); this.timeEstimateCounter = timeEstimateCounter; this.timeoutInMillis = timeout.millis(); } @@ -724,5 +726,11 @@ public class DefaultSearchContext extends SearchContext { } @Override - public QueryCache getQueryCache() { return indexService.cache().query();} + public Profilers getProfilers() { + return profilers; + } + + public void setProfilers(Profilers profilers) { + this.profilers = profilers; + } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 7225c7b32bd..1f04d013401 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -49,6 +48,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -517,8 +517,11 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public Map, Collector> queryCollectors() { return in.queryCollectors();} + public Profilers getProfilers() { + return in.getProfilers(); + } @Override - public QueryCache getQueryCache() { return in.getQueryCache();} + public Map, Collector> queryCollectors() { return in.queryCollectors();} + } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java index 96fd103fa6f..fcac5b1cc8b 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchHit.java @@ -30,7 +30,7 @@ import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -104,14 +104,14 @@ public class InternalSearchHit implements SearchHit { public InternalSearchHit(int docId, String id, Text type, Map fields) { this.docId = docId; - this.id = new StringAndBytesText(id); + this.id = new Text(id); this.type = type; this.fields = fields; } public InternalSearchHit(int nestedTopDocId, String id, Text type, InternalNestedIdentity nestedIdentity, Map fields) { this.docId = nestedTopDocId; - this.id = new StringAndBytesText(id); + this.id = new Text(id); this.type = type; this.nestedIdentity = nestedIdentity; this.fields = fields; @@ -339,7 +339,7 @@ public class InternalSearchHit implements SearchHit { if (sortValues != null) { for (int i = 0; i < sortValues.length; i++) { if (sortValues[i] instanceof BytesRef) { - sortValuesCopy[i] = new StringAndBytesText(new BytesArray((BytesRef) sortValues[i])); + sortValuesCopy[i] = new Text(new BytesArray((BytesRef) sortValues[i])); } } } @@ -783,7 +783,7 @@ public class InternalSearchHit implements SearchHit { private InternalNestedIdentity child; public InternalNestedIdentity(String field, int offset, InternalNestedIdentity child) { - this.field = new StringAndBytesText(field); + this.field = new Text(field); this.offset = offset; this.child = child; } diff --git a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java index 7b73772f9da..b8255e0bb52 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ b/core/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java @@ -28,9 +28,14 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.profile.InternalProfileShardResults; +import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHits; @@ -40,7 +45,7 @@ import static org.elasticsearch.search.internal.InternalSearchHits.readSearchHit public class InternalSearchResponse implements Streamable, ToXContent { public static InternalSearchResponse empty() { - return new InternalSearchResponse(InternalSearchHits.empty(), null, null, false, null); + return new InternalSearchResponse(InternalSearchHits.empty(), null, null, null, false, null); } private InternalSearchHits hits; @@ -49,6 +54,8 @@ public class InternalSearchResponse implements Streamable, ToXContent { private Suggest suggest; + private InternalProfileShardResults profileResults; + private boolean timedOut; private Boolean terminatedEarly = null; @@ -56,10 +63,12 @@ public class InternalSearchResponse implements Streamable, ToXContent { private InternalSearchResponse() { } - public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest, boolean timedOut, Boolean terminatedEarly) { + public InternalSearchResponse(InternalSearchHits hits, InternalAggregations aggregations, Suggest suggest, + InternalProfileShardResults profileResults, boolean timedOut, Boolean terminatedEarly) { this.hits = hits; this.aggregations = aggregations; this.suggest = suggest; + this.profileResults = profileResults; this.timedOut = timedOut; this.terminatedEarly = terminatedEarly; } @@ -84,6 +93,19 @@ public class InternalSearchResponse implements Streamable, ToXContent { return suggest; } + /** + * Returns the profile results for this search response (including all shards). + * An empty map is returned if profiling was not enabled + * + * @return Profile results + */ + public Map> profile() { + if (profileResults == null) { + return Collections.emptyMap(); + } + return profileResults.getShardResults(); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { hits.toXContent(builder, params); @@ -93,6 +115,9 @@ public class InternalSearchResponse implements Streamable, ToXContent { if (suggest != null) { suggest.toXContent(builder, params); } + if (profileResults != null) { + profileResults.toXContent(builder, params); + } return builder; } @@ -114,6 +139,12 @@ public class InternalSearchResponse implements Streamable, ToXContent { timedOut = in.readBoolean(); terminatedEarly = in.readOptionalBoolean(); + + if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) { + profileResults = new InternalProfileShardResults(in); + } else { + profileResults = null; + } } @Override @@ -134,5 +165,14 @@ public class InternalSearchResponse implements Streamable, ToXContent { out.writeBoolean(timedOut); out.writeOptionalBoolean(terminatedEarly); + + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { + if (profileResults == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + profileResults.writeTo(out); + } + } } } diff --git a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 0f61b2bc6a3..4e4e9dd5dd7 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/core/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -35,7 +35,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.query.QueryCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; @@ -56,6 +55,7 @@ import org.elasticsearch.search.fetch.script.ScriptFieldsContext; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.SearchContextHighlight; import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.suggest.SuggestionSearchContext; @@ -303,6 +303,11 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple public abstract FetchSearchResult fetchResult(); + /** + * Return a handle over the profilers for the current search request, or {@code null} if profiling is not enabled. + */ + public abstract Profilers getProfilers(); + /** * Schedule the release of a resource. The time when {@link Releasable#close()} will be called on this object * is function of the provided {@link Lifetime}. @@ -367,5 +372,4 @@ public abstract class SearchContext extends DelegatingHasContextAndHeaders imple CONTEXT } - public abstract QueryCache getQueryCache(); } diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java index 47791aeddfa..9d15dfd5790 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchLocalRequest.java @@ -71,6 +71,8 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S private Boolean requestCache; private long nowInMillis; + private boolean profile; + ShardSearchLocalRequest() { } @@ -165,6 +167,16 @@ public class ShardSearchLocalRequest extends ContextAndHeaderHolder implements S return scroll; } + @Override + public void setProfile(boolean profile) { + this.profile = profile; + } + + @Override + public boolean isProfile() { + return profile; + } + @SuppressWarnings("unchecked") protected void innerReadFrom(StreamInput in) throws IOException { index = in.readString(); diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index fb631b08270..b1730b6a14e 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -59,6 +59,17 @@ public interface ShardSearchRequest extends HasContextAndHeaders { Scroll scroll(); + /** + * Sets if this shard search needs to be profiled or not + * @param profile True if the shard should be profiled + */ + void setProfile(boolean profile); + + /** + * Returns true if this shard search is being profiled or not + */ + boolean isProfile(); + /** * Returns the cache key for this shard search request, based on its content */ diff --git a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java index 279d9d6bd20..0f9c0ced411 100644 --- a/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java +++ b/core/src/main/java/org/elasticsearch/search/internal/ShardSearchTransportRequest.java @@ -150,4 +150,14 @@ public class ShardSearchTransportRequest extends TransportRequest implements Sha public BytesReference cacheKey() throws IOException { return shardSearchLocalRequest.cacheKey(); } + + @Override + public void setProfile(boolean profile) { + shardSearchLocalRequest.setProfile(profile); + } + + @Override + public boolean isProfile() { + return shardSearchLocalRequest.isProfile(); + } } diff --git a/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java new file mode 100644 index 00000000000..4949c6388d2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/CollectorResult.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +/** + * Public interface and serialization container for profiled timings of the + * Collectors used in the search. Children CollectorResult's may be + * embedded inside of a parent CollectorResult + */ +public class CollectorResult implements ToXContent, Writeable { + + public static final String REASON_SEARCH_COUNT = "search_count"; + public static final String REASON_SEARCH_TOP_HITS = "search_top_hits"; + public static final String REASON_SEARCH_TERMINATE_AFTER_COUNT = "search_terminate_after_count"; + public static final String REASON_SEARCH_POST_FILTER = "search_post_filter"; + public static final String REASON_SEARCH_MIN_SCORE = "search_min_score"; + public static final String REASON_SEARCH_MULTI = "search_multi"; + public static final String REASON_SEARCH_TIMEOUT = "search_timeout"; + public static final String REASON_AGGREGATION = "aggregation"; + public static final String REASON_AGGREGATION_GLOBAL = "aggregation_global"; + + private static final ParseField NAME = new ParseField("name"); + private static final ParseField REASON = new ParseField("reason"); + private static final ParseField TIME = new ParseField("time"); + private static final ParseField CHILDREN = new ParseField("children"); + + /** + * A more friendly representation of the Collector's class name + */ + private final String collectorName; + + /** + * A "hint" to help provide some context about this Collector + */ + private final String reason; + + /** + * The total elapsed time for this Collector + */ + private final Long time; + + /** + * A list of children collectors "embedded" inside this collector + */ + private List children; + + public CollectorResult(String collectorName, String reason, Long time, List children) { + this.collectorName = collectorName; + this.reason = reason; + this.time = time; + this.children = children; + } + + public CollectorResult(StreamInput in) throws IOException { + this.collectorName = in.readString(); + this.reason = in.readString(); + this.time = in.readLong(); + int size = in.readVInt(); + this.children = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + CollectorResult child = new CollectorResult(in); + this.children.add(child); + } + } + + /** + * @return the profiled time for this collector (inclusive of children) + */ + public long getTime() { + return this.time; + } + + /** + * @return a human readable "hint" about what this collector was used for + */ + public String getReason() { + return this.reason; + } + + /** + * @return the lucene class name of the collector + */ + public String getName() { + return this.collectorName; + } + + /** + * @return a list of children collectors + */ + public List getProfiledChildren() { + return children; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder = builder.startObject() + .field(NAME.getPreferredName(), toString()) + .field(REASON.getPreferredName(), reason) + .field(TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double) (getTime() / 1000000.0))); + + if (!children.isEmpty()) { + builder = builder.startArray(CHILDREN.getPreferredName()); + for (CollectorResult child : children) { + builder = child.toXContent(builder, params); + } + builder = builder.endArray(); + } + builder = builder.endObject(); + return builder; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(collectorName); + out.writeString(reason); + out.writeLong(time); + out.writeVInt(children.size()); + for (CollectorResult child : children) { + child.writeTo(out); + } + } + + @Override + public Object readFrom(StreamInput in) throws IOException { + return new CollectorResult(in); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java new file mode 100644 index 00000000000..132731f37c6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileCollector.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.LeafCollector; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * This class wraps a Lucene Collector and times the execution of: + * - setScorer() + * - collect() + * - doSetNextReader() + * - needsScores() + * + * InternalProfiler facilitates the linking of the the Collector graph + */ +public class InternalProfileCollector implements Collector { + + /** + * A more friendly representation of the Collector's class name + */ + private final String collectorName; + + /** + * A "hint" to help provide some context about this Collector + */ + private final String reason; + + /** The wrapped collector */ + private final ProfileCollector collector; + + /** + * A list of "embedded" children collectors + */ + private final List children; + + public InternalProfileCollector(Collector collector, String reason, List children) { + this.collector = new ProfileCollector(collector); + this.reason = reason; + this.collectorName = deriveCollectorName(collector); + this.children = children; + } + + /** + * @return the profiled time for this collector (inclusive of children) + */ + public long getTime() { + return collector.getTime(); + } + + /** + * @return a human readable "hint" about what this collector was used for + */ + public String getReason() { + return this.reason; + } + + /** + * @return the lucene class name of the collector + */ + public String getName() { + return this.collectorName; + } + + /** + * Creates a human-friendly representation of the Collector name. + * + * Bucket Collectors use the aggregation name in their toString() method, + * which makes the profiled output a bit nicer. + * + * @param c The Collector to derive a name from + * @return A (hopefully) prettier name + */ + private String deriveCollectorName(Collector c) { + String s = c.getClass().getSimpleName(); + + // MutiCollector which wraps multiple BucketCollectors is generated + // via an anonymous class, so this corrects the lack of a name by + // asking the enclosingClass + if (s.equals("")) { + s = c.getClass().getEnclosingClass().getSimpleName(); + } + + // Aggregation collector toString()'s include the user-defined agg name + if (reason.equals(CollectorResult.REASON_AGGREGATION) || reason.equals(CollectorResult.REASON_AGGREGATION_GLOBAL)) { + s += ": [" + c.toString() + "]"; + } + return s; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + return collector.getLeafCollector(context); + } + + @Override + public boolean needsScores() { + return collector.needsScores(); + } + + public CollectorResult getCollectorTree() { + return InternalProfileCollector.doGetCollectorTree(this); + } + + private static CollectorResult doGetCollectorTree(InternalProfileCollector collector) { + List childResults = new ArrayList<>(collector.children.size()); + for (InternalProfileCollector child : collector.children) { + CollectorResult result = doGetCollectorTree(child); + childResults.add(result); + } + return new CollectorResult(collector.getName(), collector.getReason(), collector.getTime(), childResults); + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java new file mode 100644 index 00000000000..e6052ff5095 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileShardResults.java @@ -0,0 +1,108 @@ +package org.elasticsearch.search.profile; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.*; +import java.util.stream.Collectors; + +/** + * A container class to hold all the profile results across all shards. Internally + * holds a map of shard ID -> Profiled results + */ +public final class InternalProfileShardResults implements Writeable, ToXContent{ + + private Map> shardResults; + + public InternalProfileShardResults(Map> shardResults) { + Map> transformed = + shardResults.entrySet() + .stream() + .collect(Collectors.toMap( + Map.Entry::getKey, + e -> Collections.unmodifiableList(e.getValue())) + ); + this.shardResults = Collections.unmodifiableMap(transformed); + } + + public InternalProfileShardResults(StreamInput in) throws IOException { + int size = in.readInt(); + shardResults = new HashMap<>(size); + + for (int i = 0; i < size; i++) { + String key = in.readString(); + int shardResultsSize = in.readInt(); + + List shardResult = new ArrayList<>(shardResultsSize); + + for (int j = 0; j < shardResultsSize; j++) { + ProfileShardResult result = new ProfileShardResult(in); + shardResult.add(result); + } + shardResults.put(key, shardResult); + } + } + + public Map> getShardResults() { + return this.shardResults; + } + + @Override + public InternalProfileShardResults readFrom(StreamInput in) throws IOException { + return new InternalProfileShardResults(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeInt(shardResults.size()); + for (Map.Entry> entry : shardResults.entrySet()) { + out.writeString(entry.getKey()); + out.writeInt(entry.getValue().size()); + + for (ProfileShardResult result : entry.getValue()) { + result.writeTo(out); + } + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("profile").startArray("shards"); + + for (Map.Entry> entry : shardResults.entrySet()) { + builder.startObject().field("id",entry.getKey()).startArray("searches"); + for (ProfileShardResult result : entry.getValue()) { + builder.startObject(); + result.toXContent(builder, params); + builder.endObject(); + } + builder.endArray().endObject(); + } + + builder.endArray().endObject(); + return builder; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java new file mode 100644 index 00000000000..4bc8a85a781 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/InternalProfileTree.java @@ -0,0 +1,235 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.search.Query; + +import java.util.*; +import java.util.concurrent.LinkedBlockingDeque; + +/** + * This class tracks the dependency tree for queries (scoring and rewriting) and + * generates {@link ProfileBreakdown} for each node in the tree. It also finalizes the tree + * and returns a list of {@link ProfileResult} that can be serialized back to the client + */ +final class InternalProfileTree { + + private ArrayList timings; + + /** Maps the Query to it's list of children. This is basically the dependency tree */ + private ArrayList> tree; + + /** A list of the original queries, keyed by index position */ + private ArrayList queries; + + /** A list of top-level "roots". Each root can have its own tree of profiles */ + private ArrayList roots; + + /** Rewrite time */ + private long rewriteTime; + private long rewriteScratch; + + /** A temporary stack used to record where we are in the dependency tree. Only used by scoring queries */ + private Deque stack; + + private int currentToken = 0; + + public InternalProfileTree() { + timings = new ArrayList<>(10); + stack = new LinkedBlockingDeque<>(10); + tree = new ArrayList<>(10); + queries = new ArrayList<>(10); + roots = new ArrayList<>(10); + } + + /** + * Returns a {@link ProfileBreakdown} for a scoring query. Scoring queries (e.g. those + * that are past the rewrite phase and are now being wrapped by createWeight() ) follow + * a recursive progression. We can track the dependency tree by a simple stack + * + * The only hiccup is that the first scoring query will be identical to the last rewritten + * query, so we need to take special care to fix that + * + * @param query The scoring query we wish to profile + * @return A ProfileBreakdown for this query + */ + public ProfileBreakdown getQueryBreakdown(Query query) { + int token = currentToken; + + boolean stackEmpty = stack.isEmpty(); + + // If the stack is empty, we are a new root query + if (stackEmpty) { + + // We couldn't find a rewritten query to attach to, so just add it as a + // top-level root. This is just a precaution: it really shouldn't happen. + // We would only get here if a top-level query that never rewrites for some reason. + roots.add(token); + + // Increment the token since we are adding a new node, but notably, do not + // updateParent() because this was added as a root + currentToken += 1; + stack.add(token); + + return addDependencyNode(query, token); + } + + updateParent(token); + + // Increment the token since we are adding a new node + currentToken += 1; + stack.add(token); + + return addDependencyNode(query, token); + } + + /** + * Begin timing a query for a specific Timing context + */ + public void startRewriteTime() { + assert rewriteScratch == 0; + rewriteScratch = System.nanoTime(); + } + + /** + * Halt the timing process and add the elapsed rewriting time. + * startRewriteTime() must be called for a particular context prior to calling + * stopAndAddRewriteTime(), otherwise the elapsed time will be negative and + * nonsensical + * + * @return The elapsed time + */ + public long stopAndAddRewriteTime() { + long time = Math.max(1, System.nanoTime() - rewriteScratch); + rewriteTime += time; + rewriteScratch = 0; + return time; + } + + /** + * Helper method to add a new node to the dependency tree. + * + * Initializes a new list in the dependency tree, saves the query and + * generates a new {@link ProfileBreakdown} to track the timings + * of this query + * + * @param query The query to profile + * @param token The assigned token for this query + * @return A ProfileBreakdown to profile this query + */ + private ProfileBreakdown addDependencyNode(Query query, int token) { + + // Add a new slot in the dependency tree + tree.add(new ArrayList<>(5)); + + // Save our query for lookup later + queries.add(query); + + ProfileBreakdown queryTimings = new ProfileBreakdown(); + timings.add(token, queryTimings); + return queryTimings; + } + + /** + * Removes the last (e.g. most recent) value on the stack + */ + public void pollLast() { + stack.pollLast(); + } + + /** + * After the query has been run and profiled, we need to merge the flat timing map + * with the dependency graph to build a data structure that mirrors the original + * query tree + * + * @return a hierarchical representation of the profiled query tree + */ + public List getQueryTree() { + ArrayList results = new ArrayList<>(5); + for (Integer root : roots) { + results.add(doGetQueryTree(root)); + } + return results; + } + + /** + * Recursive helper to finalize a node in the dependency tree + * @param token The node we are currently finalizing + * @return A hierarchical representation of the tree inclusive of children at this level + */ + private ProfileResult doGetQueryTree(int token) { + Query query = queries.get(token); + ProfileBreakdown breakdown = timings.get(token); + Map timings = breakdown.toTimingMap(); + List children = tree.get(token); + List childrenProfileResults = Collections.emptyList(); + + if (children != null) { + childrenProfileResults = new ArrayList<>(children.size()); + for (Integer child : children) { + ProfileResult childNode = doGetQueryTree(child); + childrenProfileResults.add(childNode); + } + } + + // TODO this would be better done bottom-up instead of top-down to avoid + // calculating the same times over and over...but worth the effort? + long nodeTime = getNodeTime(timings, childrenProfileResults); + String queryDescription = query.getClass().getSimpleName(); + String luceneName = query.toString(); + return new ProfileResult(queryDescription, luceneName, timings, childrenProfileResults, nodeTime); + } + + public long getRewriteTime() { + return rewriteTime; + } + + /** + * Internal helper to add a child to the current parent node + * + * @param childToken The child to add to the current parent + */ + private void updateParent(int childToken) { + Integer parent = stack.peekLast(); + ArrayList parentNode = tree.get(parent); + parentNode.add(childToken); + tree.set(parent, parentNode); + } + + /** + * Internal helper to calculate the time of a node, inclusive of children + * + * @param timings A map of breakdown timing for the node + * @param children All children profile results at this node + * @return The total time at this node, inclusive of children + */ + private static long getNodeTime(Map timings, List children) { + long nodeTime = 0; + for (long time : timings.values()) { + nodeTime += time; + } + + // Then add up our children + for (ProfileResult child : children) { + nodeTime += getNodeTime(child.getTimeBreakdown(), child.getProfiledChildren()); + } + return nodeTime; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java new file mode 100644 index 00000000000..55ad77b6937 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileBreakdown.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +/** + * A record of timings for the various operations that may happen during query execution. + * A node's time may be composed of several internal attributes (rewriting, weighting, + * scoring, etc). + */ +public final class ProfileBreakdown { + + /** Enumeration of all supported timing types. */ + public enum TimingType { + CREATE_WEIGHT, + BUILD_SCORER, + NEXT_DOC, + ADVANCE, + MATCH, + SCORE; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + /** + * The accumulated timings for this query node + */ + private final long[] timings; + + /** Scrach to store the current timing type. */ + private TimingType currentTimingType; + + /** + * The temporary scratch space for holding start-times + */ + private long scratch; + + /** Sole constructor. */ + public ProfileBreakdown() { + timings = new long[TimingType.values().length]; + } + + /** + * Begin timing a query for a specific Timing context + * @param timing The timing context being profiled + */ + public void startTime(TimingType timing) { + assert currentTimingType == null; + assert scratch == 0; + currentTimingType = timing; + scratch = System.nanoTime(); + } + + /** + * Halt the timing process and save the elapsed time. + * startTime() must be called for a particular context prior to calling + * stopAndRecordTime(), otherwise the elapsed time will be negative and + * nonsensical + * + * @return The elapsed time + */ + public long stopAndRecordTime() { + long time = Math.max(1, System.nanoTime() - scratch); + timings[currentTimingType.ordinal()] += time; + currentTimingType = null; + scratch = 0L; + return time; + } + + /** Convert this record to a map from {@link TimingType} to times. */ + public Map toTimingMap() { + Map map = new HashMap<>(); + for (TimingType timingType : TimingType.values()) { + map.put(timingType.toString(), timings[timingType.ordinal()]); + } + return Collections.unmodifiableMap(map); + } + + /** + * Add other's timings into this breakdown + * @param other Another Breakdown to merge with this one + */ + public void merge(ProfileBreakdown other) { + assert(timings.length == other.timings.length); + for (int i = 0; i < timings.length; ++i) { + timings[i] += other.timings[i]; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java new file mode 100644 index 00000000000..7d7538c9117 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileCollector.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.FilterCollector; +import org.apache.lucene.search.FilterLeafCollector; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Scorer; + +import java.io.IOException; + +/** A collector that profiles how much time is spent calling it. */ +final class ProfileCollector extends FilterCollector { + + private long time; + + /** Sole constructor. */ + public ProfileCollector(Collector in) { + super(in); + } + + /** Return the wrapped collector. */ + public Collector getDelegate() { + return in; + } + + @Override + public boolean needsScores() { + final long start = System.nanoTime(); + try { + return super.needsScores(); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + final long start = System.nanoTime(); + final LeafCollector inLeafCollector; + try { + inLeafCollector = super.getLeafCollector(context); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + return new FilterLeafCollector(inLeafCollector) { + + @Override + public void collect(int doc) throws IOException { + final long start = System.nanoTime(); + try { + super.collect(doc); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + } + + @Override + public void setScorer(Scorer scorer) throws IOException { + final long start = System.nanoTime(); + try { + super.setScorer(scorer); + } finally { + time += Math.max(1, System.nanoTime() - start); + } + } + }; + } + + /** Return the total time spent on this collector. */ + public long getTime() { + return time; + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java new file mode 100644 index 00000000000..4c8752fdaf2 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileResult.java @@ -0,0 +1,165 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; + +/** + * This class is the internal representation of a profiled Query, corresponding + * to a single node in the query tree. It is built after the query has finished executing + * and is merely a structured representation, rather than the entity that collects the timing + * profile (see InternalProfiler for that) + * + * Each InternalProfileResult has a List of InternalProfileResults, which will contain + * "children" queries if applicable + */ +final class ProfileResult implements Writeable, ToXContent { + + private static final ParseField QUERY_TYPE = new ParseField("query_type"); + private static final ParseField LUCENE_DESCRIPTION = new ParseField("lucene"); + private static final ParseField NODE_TIME = new ParseField("time"); + private static final ParseField CHILDREN = new ParseField("children"); + private static final ParseField BREAKDOWN = new ParseField("breakdown"); + + private final String queryType; + private final String luceneDescription; + private final Map timings; + private final long nodeTime; + private final List children; + + public ProfileResult(String queryType, String luceneDescription, Map timings, List children, long nodeTime) { + this.queryType = queryType; + this.luceneDescription = luceneDescription; + this.timings = timings; + this.children = children; + this.nodeTime = nodeTime; + } + + public ProfileResult(StreamInput in) throws IOException{ + this.queryType = in.readString(); + this.luceneDescription = in.readString(); + this.nodeTime = in.readLong(); + + int timingsSize = in.readVInt(); + this.timings = new HashMap<>(timingsSize); + for (int i = 0; i < timingsSize; ++i) { + timings.put(in.readString(), in.readLong()); + } + + int size = in.readVInt(); + this.children = new ArrayList<>(size); + + for (int i = 0; i < size; i++) { + children.add(new ProfileResult(in)); + } + } + + /** + * Retrieve the lucene description of this query (e.g. the "explain" text) + */ + public String getLuceneDescription() { + return luceneDescription; + } + + /** + * Retrieve the name of the query (e.g. "TermQuery") + */ + public String getQueryName() { + return queryType; + } + + /** + * Returns the timing breakdown for this particular query node + */ + public Map getTimeBreakdown() { + return Collections.unmodifiableMap(timings); + } + + /** + * Returns the total time (inclusive of children) for this query node. + * + * @return elapsed time in nanoseconds + */ + public long getTime() { + return nodeTime; + } + + /** + * Returns a list of all profiled children queries + */ + public List getProfiledChildren() { + return Collections.unmodifiableList(children); + } + + @Override + public ProfileResult readFrom(StreamInput in) throws IOException { + return new ProfileResult(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(queryType); + out.writeString(luceneDescription); + out.writeLong(nodeTime); // not Vlong because can be negative + out.writeVInt(timings.size()); + for (Map.Entry entry : timings.entrySet()) { + out.writeString(entry.getKey()); + out.writeLong(entry.getValue()); + } + out.writeVInt(children.size()); + for (ProfileResult child : children) { + child.writeTo(out); + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder = builder.startObject() + .field(QUERY_TYPE.getPreferredName(), queryType) + .field(LUCENE_DESCRIPTION.getPreferredName(), luceneDescription) + .field(NODE_TIME.getPreferredName(), String.format(Locale.US, "%.10gms", (double)(getTime() / 1000000.0))) + .field(BREAKDOWN.getPreferredName(), timings); + + if (!children.isEmpty()) { + builder = builder.startArray(CHILDREN.getPreferredName()); + for (ProfileResult child : children) { + builder = child.toXContent(builder, params); + } + builder = builder.endArray(); + } + + builder = builder.endObject(); + return builder; + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java new file mode 100644 index 00000000000..b0dc6f2cd4e --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileScorer.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Collection; + +/** + * {@link Scorer} wrapper that will compute how much time is spent on moving + * the iterator, confirming matches and computing scores. + */ +final class ProfileScorer extends Scorer { + + private final Scorer scorer; + private ProfileWeight profileWeight; + private final ProfileBreakdown profile; + + ProfileScorer(ProfileWeight w, Scorer scorer, ProfileBreakdown profile) throws IOException { + super(w); + this.scorer = scorer; + this.profileWeight = w; + this.profile = profile; + } + + @Override + public int docID() { + return scorer.docID(); + } + + @Override + public int advance(int target) throws IOException { + profile.startTime(ProfileBreakdown.TimingType.ADVANCE); + try { + return scorer.advance(target); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int nextDoc() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC); + try { + return scorer.nextDoc(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public float score() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.SCORE); + try { + return scorer.score(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int freq() throws IOException { + return scorer.freq(); + } + + @Override + public long cost() { + return scorer.cost(); + } + + @Override + public Weight getWeight() { + return profileWeight; + } + + @Override + public Collection getChildren() { + return scorer.getChildren(); + } + + @Override + public TwoPhaseIterator asTwoPhaseIterator() { + final TwoPhaseIterator in = scorer.asTwoPhaseIterator(); + if (in == null) { + return null; + } + final DocIdSetIterator inApproximation = in.approximation(); + final DocIdSetIterator approximation = new DocIdSetIterator() { + + @Override + public int advance(int target) throws IOException { + profile.startTime(ProfileBreakdown.TimingType.ADVANCE); + try { + return inApproximation.advance(target); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int nextDoc() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.NEXT_DOC); + try { + return inApproximation.nextDoc(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public int docID() { + return inApproximation.docID(); + } + + @Override + public long cost() { + return inApproximation.cost(); + } + }; + return new TwoPhaseIterator(approximation) { + @Override + public boolean matches() throws IOException { + profile.startTime(ProfileBreakdown.TimingType.MATCH); + try { + return in.matches(); + } finally { + profile.stopAndRecordTime(); + } + } + + @Override + public float matchCost() { + return in.matchCost(); + } + }; + } +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java new file mode 100644 index 00000000000..6e005babb3c --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileShardResult.java @@ -0,0 +1,103 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.*; + +/** + * A container class to hold the profile results for a single shard in the request. + * Contains a list of query profiles, a collector tree and a total rewrite tree. + */ +public final class ProfileShardResult implements Writeable, ToXContent { + + private final List profileResults; + + private final CollectorResult profileCollector; + + private final long rewriteTime; + + public ProfileShardResult(List profileResults, long rewriteTime, + CollectorResult profileCollector) { + assert(profileCollector != null); + this.profileResults = profileResults; + this.profileCollector = profileCollector; + this.rewriteTime = rewriteTime; + } + + public ProfileShardResult(StreamInput in) throws IOException { + int profileSize = in.readVInt(); + profileResults = new ArrayList<>(profileSize); + for (int j = 0; j < profileSize; j++) { + profileResults.add(new ProfileResult(in)); + } + + profileCollector = new CollectorResult(in); + rewriteTime = in.readLong(); + } + + public List getQueryResults() { + return Collections.unmodifiableList(profileResults); + } + + public long getRewriteTime() { + return rewriteTime; + } + + public CollectorResult getCollectorResult() { + return profileCollector; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startArray("query"); + for (ProfileResult p : profileResults) { + p.toXContent(builder, params); + } + builder.endArray(); + builder.field("rewrite_time", rewriteTime); + builder.startArray("collector"); + profileCollector.toXContent(builder, params); + builder.endArray(); + return builder; + } + + @Override + public ProfileShardResult readFrom(StreamInput in) throws IOException { + return new ProfileShardResult(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(profileResults.size()); + for (ProfileResult p : profileResults) { + p.writeTo(out); + } + profileCollector.writeTo(out); + out.writeLong(rewriteTime); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java new file mode 100644 index 00000000000..1ce5cd721fe --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/ProfileWeight.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BulkScorer; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Set; + +/** + * Weight wrapper that will compute how much time it takes to build the + * {@link Scorer} and then return a {@link Scorer} that is wrapped in + * order to compute timings as well. + */ +public final class ProfileWeight extends Weight { + + private final Weight subQueryWeight; + private final ProfileBreakdown profile; + + public ProfileWeight(Query query, Weight subQueryWeight, ProfileBreakdown profile) throws IOException { + super(query); + this.subQueryWeight = subQueryWeight; + this.profile = profile; + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + profile.startTime(ProfileBreakdown.TimingType.BUILD_SCORER); + final Scorer subQueryScorer; + try { + subQueryScorer = subQueryWeight.scorer(context); + } finally { + profile.stopAndRecordTime(); + } + if (subQueryScorer == null) { + return null; + } + + return new ProfileScorer(this, subQueryScorer, profile); + } + + @Override + public BulkScorer bulkScorer(LeafReaderContext context) throws IOException { + // We use the default bulk scorer instead of the specialized one. The reason + // is that Lucene's BulkScorers do everything at once: finding matches, + // scoring them and calling the collector, so they make it impossible to + // see where time is spent, which is the purpose of query profiling. + // The default bulk scorer will pull a scorer and iterate over matches, + // this might be a significantly different execution path for some queries + // like disjunctions, but in general this is what is done anyway + return super.bulkScorer(context); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + return subQueryWeight.explain(context, doc); + } + + @Override + public float getValueForNormalization() throws IOException { + return subQueryWeight.getValueForNormalization(); + } + + @Override + public void normalize(float norm, float topLevelBoost) { + subQueryWeight.normalize(norm, topLevelBoost); + } + + @Override + public void extractTerms(Set set) { + subQueryWeight.extractTerms(set); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profiler.java b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java new file mode 100644 index 00000000000..bf0c9ec01b6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/Profiler.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.search.Query; + +import java.util.*; + +/** + * This class acts as a thread-local storage for profiling a query. It also + * builds a representation of the query tree which is built constructed + * "online" as the weights are wrapped by ContextIndexSearcher. This allows us + * to know the relationship between nodes in tree without explicitly + * walking the tree or pre-wrapping everything + * + * A Profiler is associated with every Search, not per Search-Request. E.g. a + * request may execute two searches (query + global agg). A Profiler just + * represents one of those + */ +public final class Profiler { + + private final InternalProfileTree queryTree = new InternalProfileTree(); + + /** + * The root Collector used in the search + */ + private InternalProfileCollector collector; + + public Profiler() {} + + /** Set the collector that is associated with this profiler. */ + public void setCollector(InternalProfileCollector collector) { + if (this.collector != null) { + throw new IllegalStateException("The collector can only be set once."); + } + this.collector = Objects.requireNonNull(collector); + } + + /** + * Get the {@link ProfileBreakdown} for the given query, potentially creating it if it did not exist. + * This should only be used for queries that will be undergoing scoring. Do not use it to profile the + * rewriting phase + */ + public ProfileBreakdown getQueryBreakdown(Query query) { + return queryTree.getQueryBreakdown(query); + } + + /** + * Begin timing the rewrite phase of a request. All rewrites are accumulated together into a + * single metric + */ + public void startRewriteTime() { + queryTree.startRewriteTime(); + } + + /** + * Stop recording the current rewrite and add it's time to the total tally, returning the + * cumulative time so far. + * + * @return cumulative rewrite time + */ + public long stopAndAddRewriteTime() { + return queryTree.stopAndAddRewriteTime(); + } + + /** + * Removes the last (e.g. most recent) query on the stack. This should only be called for scoring + * queries, not rewritten queries + */ + public void pollLastQuery() { + queryTree.pollLast(); + } + + /** + * @return a hierarchical representation of the profiled query tree + */ + public List getQueryTree() { + return queryTree.getQueryTree(); + } + + /** + * @return total time taken to rewrite all queries in this profile + */ + public long getRewriteTime() { + return queryTree.getRewriteTime(); + } + + /** + * Return the current root Collector for this search + */ + public CollectorResult getCollector() { + return collector.getCollectorTree(); + } + + /** + * Helper method to convert Profiler into InternalProfileShardResults, which can be + * serialized to other nodes, emitted as JSON, etc. + * + * @param profilers A list of Profilers to convert into InternalProfileShardResults + * @return A list of corresponding InternalProfileShardResults + */ + public static List buildShardResults(List profilers) { + List results = new ArrayList<>(profilers.size()); + for (Profiler profiler : profilers) { + ProfileShardResult result = new ProfileShardResult( + profiler.getQueryTree(), profiler.getRewriteTime(), profiler.getCollector()); + results.add(result); + } + return results; + } + + +} diff --git a/core/src/main/java/org/elasticsearch/search/profile/Profilers.java b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java new file mode 100644 index 00000000000..0fb7d9ac1c9 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/search/profile/Profilers.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.search.internal.ContextIndexSearcher; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** Wrapper around several {@link Profiler}s that makes management easier. */ +public final class Profilers { + + private final ContextIndexSearcher searcher; + private final List profilers; + + /** Sole constructor. This {@link Profilers} instance will initiall wrap one {@link Profiler}. */ + public Profilers(ContextIndexSearcher searcher) { + this.searcher = searcher; + this.profilers = new ArrayList<>(); + addProfiler(); + } + + /** Switch to a new profile. */ + public Profiler addProfiler() { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + profilers.add(profiler); + return profiler; + } + + /** Get the current profiler. */ + public Profiler getCurrent() { + return profilers.get(profilers.size() - 1); + } + + /** Return the list of all created {@link Profiler}s so far. */ + public List getProfilers() { + return Collections.unmodifiableList(profilers); + } + +} diff --git a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java index ce8836cd336..08ff849871f 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java +++ b/core/src/main/java/org/elasticsearch/search/query/QueryPhase.java @@ -52,13 +52,16 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.AggregationPhase; import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.profile.*; import org.elasticsearch.search.rescore.RescorePhase; import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.sort.SortParseElement; import org.elasticsearch.search.sort.TrackScoresParseElement; import org.elasticsearch.search.suggest.SuggestPhase; +import java.util.AbstractList; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -124,6 +127,11 @@ public class QueryPhase implements SearchPhase { } suggestPhase.execute(searchContext); aggregationPhase.execute(searchContext); + + if (searchContext.getProfilers() != null) { + List shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers()); + searchContext.queryResult().profileResults(shardResults); + } } private static boolean returnsDocsInOrder(Query query, Sort sort) { @@ -147,6 +155,7 @@ public class QueryPhase implements SearchPhase { QuerySearchResult queryResult = searchContext.queryResult(); queryResult.searchTimedOut(false); + final boolean doProfile = searchContext.getProfilers() != null; final SearchType searchType = searchContext.searchType(); boolean rescore = false; try { @@ -162,9 +171,13 @@ public class QueryPhase implements SearchPhase { Callable topDocsCallable; assert query == searcher.rewrite(query); // already rewritten + if (searchContext.size() == 0) { // no matter what the value of from is final TotalHitCountCollector totalHitCountCollector = new TotalHitCountCollector(); collector = totalHitCountCollector; + if (searchContext.getProfilers() != null) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_COUNT, Collections.emptyList()); + } topDocsCallable = new Callable() { @Override public TopDocs call() throws Exception { @@ -219,6 +232,9 @@ public class QueryPhase implements SearchPhase { topDocsCollector = TopScoreDocCollector.create(numDocs, lastEmittedDoc); } collector = topDocsCollector; + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TOP_HITS, Collections.emptyList()); + } topDocsCallable = new Callable() { @Override public TopDocs call() throws Exception { @@ -254,27 +270,57 @@ public class QueryPhase implements SearchPhase { final boolean terminateAfterSet = searchContext.terminateAfter() != SearchContext.DEFAULT_TERMINATE_AFTER; if (terminateAfterSet) { + final Collector child = collector; // throws Lucene.EarlyTerminationException when given count is reached collector = Lucene.wrapCountBasedEarlyTerminatingCollector(collector, searchContext.terminateAfter()); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TERMINATE_AFTER_COUNT, + Collections.singletonList((InternalProfileCollector) child)); + } } if (searchContext.parsedPostFilter() != null) { + final Collector child = collector; // this will only get applied to the actual search collector and not // to any scoped collectors, also, it will only be applied to the main collector // since that is where the filter should only work final Weight filterWeight = searcher.createNormalizedWeight(searchContext.parsedPostFilter().query(), false); collector = new FilteredCollector(collector, filterWeight); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_POST_FILTER, + Collections.singletonList((InternalProfileCollector) child)); + } } // plug in additional collectors, like aggregations - List allCollectors = new ArrayList<>(); - allCollectors.add(collector); - allCollectors.addAll(searchContext.queryCollectors().values()); - collector = MultiCollector.wrap(allCollectors); + final List subCollectors = new ArrayList<>(); + subCollectors.add(collector); + subCollectors.addAll(searchContext.queryCollectors().values()); + collector = MultiCollector.wrap(subCollectors); + if (doProfile && collector instanceof InternalProfileCollector == false) { + // When there is a single collector to wrap, MultiCollector returns it + // directly, so only wrap in the case that there are several sub collectors + final List children = new AbstractList() { + @Override + public InternalProfileCollector get(int index) { + return (InternalProfileCollector) subCollectors.get(index); + } + @Override + public int size() { + return subCollectors.size(); + } + }; + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MULTI, children); + } // apply the minimum score after multi collector so we filter aggs as well if (searchContext.minimumScore() != null) { + final Collector child = collector; collector = new MinimumScoreCollector(collector, searchContext.minimumScore()); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_MIN_SCORE, + Collections.singletonList((InternalProfileCollector) child)); + } } if (collector.getClass() == TotalHitCountCollector.class) { @@ -319,13 +365,21 @@ public class QueryPhase implements SearchPhase { final boolean timeoutSet = searchContext.timeoutInMillis() != SearchService.NO_TIMEOUT.millis(); if (timeoutSet && collector != null) { // collector might be null if no collection is actually needed + final Collector child = collector; // TODO: change to use our own counter that uses the scheduler in ThreadPool // throws TimeLimitingCollector.TimeExceededException when timeout has reached collector = Lucene.wrapTimeLimitingCollector(collector, searchContext.timeEstimateCounter(), searchContext.timeoutInMillis()); + if (doProfile) { + collector = new InternalProfileCollector(collector, CollectorResult.REASON_SEARCH_TIMEOUT, + Collections.singletonList((InternalProfileCollector) child)); + } } try { if (collector != null) { + if (doProfile) { + searchContext.getProfilers().getCurrent().setCollector((InternalProfileCollector) collector); + } searcher.search(query, collector); } } catch (TimeLimitingCollector.TimeExceededException e) { @@ -343,7 +397,13 @@ public class QueryPhase implements SearchPhase { queryResult.topDocs(topDocsCallable.call()); + if (searchContext.getProfilers() != null) { + List shardResults = Profiler.buildShardResults(searchContext.getProfilers().getProfilers()); + searchContext.queryResult().profileResults(shardResults); + } + return rescore; + } catch (Throwable e) { throw new QueryPhaseExecutionException(searchContext, "Failed to execute main query", e); } diff --git a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 7f8d12a9c90..9223eb5a82d 100644 --- a/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/core/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -20,6 +20,8 @@ package org.elasticsearch.search.query; import org.apache.lucene.search.TopDocs; +import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -29,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.profile.ProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; @@ -53,6 +56,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { private Suggest suggest; private boolean searchTimedOut; private Boolean terminatedEarly = null; + private List profileShardResults; public QuerySearchResult() { @@ -120,6 +124,22 @@ public class QuerySearchResult extends QuerySearchResultProvider { this.aggregations = aggregations; } + /** + * Returns the profiled results for this search, or potentially null if result was empty + * @return The profiled results, or null + */ + public @Nullable List profileResults() { + return profileShardResults; + } + + /** + * Sets the finalized profiling results for this query + * @param shardResults The finalized profile + */ + public void profileResults(List shardResults) { + this.profileShardResults = shardResults; + } + public List pipelineAggregators() { return pipelineAggregators; } @@ -191,6 +211,15 @@ public class QuerySearchResult extends QuerySearchResultProvider { } searchTimedOut = in.readBoolean(); terminatedEarly = in.readOptionalBoolean(); + + if (in.getVersion().onOrAfter(Version.V_2_2_0) && in.readBoolean()) { + int profileSize = in.readVInt(); + profileShardResults = new ArrayList<>(profileSize); + for (int i = 0; i < profileSize; i++) { + ProfileShardResult result = new ProfileShardResult(in); + profileShardResults.add(result); + } + } } @Override @@ -229,5 +258,17 @@ public class QuerySearchResult extends QuerySearchResultProvider { } out.writeBoolean(searchTimedOut); out.writeOptionalBoolean(terminatedEarly); + + if (out.getVersion().onOrAfter(Version.V_2_2_0)) { + if (profileShardResults == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(profileShardResults.size()); + for (ProfileShardResult shardResult : profileShardResults) { + shardResult.writeTo(out); + } + } + } } } diff --git a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index c465eaf6efb..e4fe2c08f75 100644 --- a/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/core/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -130,7 +131,7 @@ public class ScriptSortParser implements SortParser { if (type == null) { throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); } - final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH); + final SearchScript searchScript = context.scriptService().search(context.lookup(), script, ScriptContext.Standard.SEARCH, Collections.emptyMap()); if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java index 106672ae7ae..6a0155ffb7a 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggester.java @@ -30,7 +30,7 @@ import org.apache.lucene.search.suggest.document.TopSuggestDocs; import org.apache.lucene.search.suggest.document.TopSuggestDocsCollector; import org.apache.lucene.util.*; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.index.fielddata.AtomicFieldData; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.MappedFieldType; @@ -57,7 +57,7 @@ public class CompletionSuggester extends Suggester } CompletionSuggestion completionSuggestion = new CompletionSuggestion(name, suggestionContext.getSize()); spare.copyUTF8Bytes(suggestionContext.getText()); - CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new StringText(spare.toString()), 0, spare.length()); + CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(new Text(spare.toString()), 0, spare.length()); completionSuggestion.addTerm(completionSuggestEntry); TopSuggestDocsCollector collector = new TopDocumentsCollector(suggestionContext.getSize()); suggest(searcher, suggestionContext.toQuery(), collector); @@ -91,7 +91,7 @@ public class CompletionSuggester extends Suggester } if (numResult++ < suggestionContext.getSize()) { CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( - new StringText(suggestDoc.key.toString()), suggestDoc.score, contexts, payload); + new Text(suggestDoc.key.toString()), suggestDoc.score, contexts, payload); completionSuggestEntry.addOption(option); } else { break; diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 4bbdaf9c49e..9b083a91788 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext; import org.elasticsearch.search.suggest.phrase.PhraseSuggestionContext.DirectCandidateGenerator; import java.io.IOException; +import java.util.Collections; public final class PhraseSuggestParser implements SuggestContextParser { @@ -143,7 +144,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { } Template template = Template.parse(parser, parseFieldMatcher); CompiledScript compiledScript = suggester.scriptService().compile(template, ScriptContext.Standard.SEARCH, - headersContext); + headersContext, Collections.emptyMap()); suggestion.setCollateQueryScript(compiledScript); } else if ("params".equals(fieldName)) { suggestion.setCollateScriptParams(parser.map()); diff --git a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java index fccf9ebc30e..c7fa6fae302 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggester.java @@ -30,7 +30,6 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.query.ParsedQuery; @@ -127,11 +126,11 @@ public final class PhraseSuggester extends Suggester { if (!collateMatch && !collatePrune) { continue; } - Text phrase = new StringText(spare.toString()); + Text phrase = new Text(spare.toString()); Text highlighted = null; if (suggestion.getPreTag() != null) { spare.copyUTF8Bytes(correction.join(SEPARATOR, byteSpare, suggestion.getPreTag(), suggestion.getPostTag())); - highlighted = new StringText(spare.toString()); + highlighted = new Text(spare.toString()); } if (collatePrune) { resultEntry.addOption(new Suggestion.Entry.Option(phrase, highlighted, (float) (correction.score), collateMatch)); @@ -147,7 +146,7 @@ public final class PhraseSuggester extends Suggester { private PhraseSuggestion.Entry buildResultEntry(PhraseSuggestionContext suggestion, CharsRefBuilder spare, double cutoffScore) { spare.copyUTF8Bytes(suggestion.getText()); - return new PhraseSuggestion.Entry(new StringText(spare.toString()), 0, spare.length(), cutoffScore); + return new PhraseSuggestion.Entry(new Text(spare.toString()), 0, spare.length(), cutoffScore); } ScriptService scriptService() { diff --git a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java index 4c1b176c990..34cd3ad4d56 100644 --- a/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java +++ b/core/src/main/java/org/elasticsearch/search/suggest/term/TermSuggester.java @@ -27,8 +27,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.text.BytesText; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.search.suggest.SuggestContextParser; import org.elasticsearch.search.suggest.SuggestUtils; @@ -54,10 +52,10 @@ public final class TermSuggester extends Suggester { SuggestWord[] suggestedWords = directSpellChecker.suggestSimilar( token.term, suggestion.getShardSize(), indexReader, suggestion.getDirectSpellCheckerSettings().suggestMode() ); - Text key = new BytesText(new BytesArray(token.term.bytes())); + Text key = new Text(new BytesArray(token.term.bytes())); TermSuggestion.Entry resultEntry = new TermSuggestion.Entry(key, token.startOffset, token.endOffset - token.startOffset); for (SuggestWord suggestWord : suggestedWords) { - Text word = new StringText(suggestWord.string); + Text word = new Text(suggestWord.string); resultEntry.addOption(new TermSuggestion.Entry.Option(word, suggestWord.freq, suggestWord.score)); } response.addTerm(resultEntry); diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java index cd710d52cdc..14b2680d25c 100644 --- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -33,8 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; -import org.elasticsearch.cluster.settings.DynamicSettings; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -118,18 +117,19 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis private final MetaDataCreateIndexService createIndexService; - private final DynamicSettings dynamicSettings; + private final ClusterSettings dynamicSettings; private final MetaDataIndexUpgradeService metaDataIndexUpgradeService; private final CopyOnWriteArrayList> listeners = new CopyOnWriteArrayList<>(); private final BlockingQueue updatedSnapshotStateQueue = ConcurrentCollections.newBlockingQueue(); + private final ClusterSettings clusterSettings; @Inject public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, TransportService transportService, - AllocationService allocationService, MetaDataCreateIndexService createIndexService, @ClusterDynamicSettings DynamicSettings dynamicSettings, - MetaDataIndexUpgradeService metaDataIndexUpgradeService) { + AllocationService allocationService, MetaDataCreateIndexService createIndexService, ClusterSettings dynamicSettings, + MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettings clusterSettings) { super(settings); this.clusterService = clusterService; this.repositoriesService = repositoriesService; @@ -140,6 +140,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis this.metaDataIndexUpgradeService = metaDataIndexUpgradeService; transportService.registerRequestHandler(UPDATE_RESTORE_ACTION_NAME, UpdateIndexShardRestoreStatusRequest::new, ThreadPool.Names.SAME, new UpdateRestoreStateRequestHandler()); clusterService.add(this); + this.clusterSettings = clusterSettings; } /** @@ -389,24 +390,9 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) { if (request.includeGlobalState()) { if (metaData.persistentSettings() != null) { - boolean changed = false; - Settings.Builder persistentSettings = Settings.settingsBuilder().put(); - for (Map.Entry entry : metaData.persistentSettings().getAsMap().entrySet()) { - if (dynamicSettings.isDynamicOrLoggingSetting(entry.getKey())) { - String error = dynamicSettings.validateDynamicSetting(entry.getKey(), entry.getValue(), clusterService.state()); - if (error == null) { - persistentSettings.put(entry.getKey(), entry.getValue()); - changed = true; - } else { - logger.warn("ignoring persistent setting [{}], [{}]", entry.getKey(), error); - } - } else { - logger.warn("ignoring persistent setting [{}], not dynamically updateable", entry.getKey()); - } - } - if (changed) { - mdBuilder.persistentSettings(persistentSettings.build()); - } + Settings settings = metaData.persistentSettings(); + clusterSettings.dryRun(settings); + mdBuilder.persistentSettings(settings); } if (metaData.templates() != null) { // TODO: Should all existing templates be deleted first? diff --git a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index b0d81279b03..56e02926ed6 100644 --- a/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/core/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -20,13 +20,13 @@ package org.elasticsearch.threadpool; import org.apache.lucene.util.Counter; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; import org.elasticsearch.common.unit.SizeValue; @@ -38,14 +38,11 @@ import org.elasticsearch.common.util.concurrent.XRejectedExecutionHandler; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.node.settings.NodeSettingsService; import java.io.IOException; import java.util.*; import java.util.concurrent.*; -import java.util.function.Function; -import java.util.regex.Matcher; -import java.util.regex.Pattern; +import java.util.concurrent.atomic.AtomicBoolean; import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.settings.Settings.settingsBuilder; @@ -172,7 +169,7 @@ public class ThreadPool extends AbstractComponent { } } - public static final String THREADPOOL_GROUP = "threadpool."; + public static final Setting THREADPOOL_GROUP_SETTING = Setting.groupSetting("threadpool.", true, Setting.Scope.CLUSTER); private volatile Map executors; @@ -184,7 +181,7 @@ public class ThreadPool extends AbstractComponent { private final EstimatedTimeThread estimatedTimeThread; - private boolean settingsListenerIsSet = false; + private final AtomicBoolean settingsListenerIsSet = new AtomicBoolean(false); static final Executor DIRECT_EXECUTOR = command -> command.run(); @@ -197,7 +194,8 @@ public class ThreadPool extends AbstractComponent { assert settings.get("name") != null : "ThreadPool's settings should contain a name"; - Map groupSettings = getThreadPoolSettingsGroup(settings); + Map groupSettings = THREADPOOL_GROUP_SETTING.get(settings).getAsGroups(); + validate(groupSettings); int availableProcessors = EsExecutors.boundedNumberOfProcessors(settings); int halfProcMaxAt5 = Math.min(((availableProcessors + 1) / 2), 5); @@ -252,18 +250,12 @@ public class ThreadPool extends AbstractComponent { this.estimatedTimeThread.start(); } - private Map getThreadPoolSettingsGroup(Settings settings) { - Map groupSettings = settings.getGroups(THREADPOOL_GROUP); - validate(groupSettings); - return groupSettings; - } - - public void setNodeSettingsService(NodeSettingsService nodeSettingsService) { - if(settingsListenerIsSet) { + public void setClusterSettings(ClusterSettings clusterSettings) { + if(settingsListenerIsSet.compareAndSet(false, true)) { + clusterSettings.addSettingsUpdateConsumer(THREADPOOL_GROUP_SETTING, this::updateSettings, (s) -> validate(s.getAsGroups())); + } else { throw new IllegalStateException("the node settings listener was set more then once"); } - nodeSettingsService.addListener(new ApplySettings()); - settingsListenerIsSet = true; } public long estimatedTimeInMillis() { @@ -526,8 +518,8 @@ public class ThreadPool extends AbstractComponent { throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]"); } - public void updateSettings(Settings settings) { - Map groupSettings = getThreadPoolSettingsGroup(settings); + private void updateSettings(Settings settings) { + Map groupSettings = settings.getAsGroups(); if (groupSettings.isEmpty()) { return; } @@ -583,7 +575,7 @@ public class ThreadPool extends AbstractComponent { ThreadPoolType correctThreadPoolType = THREAD_POOL_TYPES.get(key); // TODO: the type equality check can be removed after #3760/#6732 are addressed if (type != null && !correctThreadPoolType.getType().equals(type)) { - throw new IllegalArgumentException("setting " + THREADPOOL_GROUP + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType()); + throw new IllegalArgumentException("setting " + THREADPOOL_GROUP_SETTING.getKey() + key + ".type to " + type + " is not permitted; must be " + correctThreadPoolType.getType()); } } } @@ -866,13 +858,6 @@ public class ThreadPool extends AbstractComponent { } - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - updateSettings(settings); - } - } - /** * Returns true if the given service was terminated successfully. If the termination timed out, * the service is null this method will return false. @@ -911,38 +896,4 @@ public class ThreadPool extends AbstractComponent { } return false; } - - public static ThreadPoolTypeSettingsValidator THREAD_POOL_TYPE_SETTINGS_VALIDATOR = new ThreadPoolTypeSettingsValidator(); - private static class ThreadPoolTypeSettingsValidator implements Validator { - @Override - public String validate(String setting, String value, ClusterState clusterState) { - // TODO: the type equality validation can be removed after #3760/#6732 are addressed - Matcher matcher = Pattern.compile("threadpool\\.(.*)\\.type").matcher(setting); - if (!matcher.matches()) { - return null; - } else { - String threadPool = matcher.group(1); - ThreadPool.ThreadPoolType defaultThreadPoolType = ThreadPool.THREAD_POOL_TYPES.get(threadPool); - ThreadPool.ThreadPoolType threadPoolType; - try { - threadPoolType = ThreadPool.ThreadPoolType.fromType(value); - } catch (IllegalArgumentException e) { - return e.getMessage(); - } - if (defaultThreadPoolType.equals(threadPoolType)) { - return null; - } else { - return String.format( - Locale.ROOT, - "thread pool type for [%s] can only be updated to [%s] but was [%s]", - threadPool, - defaultThreadPoolType.getType(), - threadPoolType.getType() - ); - } - } - - } - } - } diff --git a/core/src/main/java/org/elasticsearch/transport/Transport.java b/core/src/main/java/org/elasticsearch/transport/Transport.java index 10fa9b239dc..78b07e3aae3 100644 --- a/core/src/main/java/org/elasticsearch/transport/Transport.java +++ b/core/src/main/java/org/elasticsearch/transport/Transport.java @@ -21,6 +21,8 @@ package org.elasticsearch.transport; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.LifecycleComponent; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -34,9 +36,8 @@ import java.util.Map; public interface Transport extends LifecycleComponent { - public static class TransportSettings { - public static final String TRANSPORT_TCP_COMPRESS = "transport.tcp.compress"; - } + Setting TRANSPORT_PROFILES_SETTING = Setting.groupSetting("transport.profiles.", true, Setting.Scope.CLUSTER); + Setting TRANSPORT_TCP_COMPRESS = Setting.boolSetting("transport.tcp.compress", false, false, Setting.Scope.CLUSTER); void transportServiceAdapter(TransportServiceAdapter service); diff --git a/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java new file mode 100644 index 00000000000..8c042cd1937 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/transport/TransportChannelResponseHandler.java @@ -0,0 +1,76 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; + +/** + * Base class for delegating transport response to a transport channel + */ +public abstract class TransportChannelResponseHandler implements TransportResponseHandler { + + /** + * Convenience method for delegating an empty response to the provided changed + */ + public static TransportChannelResponseHandler emptyResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) { + return new TransportChannelResponseHandler(logger, channel, extraInfoOnError) { + @Override + public TransportResponse.Empty newInstance() { + return TransportResponse.Empty.INSTANCE; + } + }; + } + + private final ESLogger logger; + private final TransportChannel channel; + private final String extraInfoOnError; + + protected TransportChannelResponseHandler(ESLogger logger, TransportChannel channel, String extraInfoOnError) { + this.logger = logger; + this.channel = channel; + this.extraInfoOnError = extraInfoOnError; + } + + @Override + public void handleResponse(T response) { + try { + channel.sendResponse(response); + } catch (IOException e) { + handleException(new TransportException(e)); + } + } + + @Override + public void handleException(TransportException exp) { + try { + channel.sendResponse(exp); + } catch (IOException e) { + logger.debug("failed to send failure {}", e, extraInfoOnError == null ? "" : "(" + extraInfoOnError + ")"); + } + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } +} diff --git a/core/src/main/java/org/elasticsearch/transport/TransportModule.java b/core/src/main/java/org/elasticsearch/transport/TransportModule.java deleted file mode 100644 index abf90deee81..00000000000 --- a/core/src/main/java/org/elasticsearch/transport/TransportModule.java +++ /dev/null @@ -1,122 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.transport; - -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.transport.local.LocalTransport; -import org.elasticsearch.transport.netty.NettyTransport; - -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; - -/** - * - */ -public class TransportModule extends AbstractModule { - - public static final String TRANSPORT_TYPE_KEY = "transport.type"; - public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type"; - - public static final String LOCAL_TRANSPORT = "local"; - public static final String NETTY_TRANSPORT = "netty"; - - private final ESLogger logger; - private final Settings settings; - - private final Map> transportServices = new HashMap<>(); - private final Map> transports = new HashMap<>(); - private Class configuredTransportService; - private Class configuredTransport; - private String configuredTransportServiceSource; - private String configuredTransportSource; - - public TransportModule(Settings settings) { - this.settings = settings; - this.logger = Loggers.getLogger(getClass(), settings); - addTransport(LOCAL_TRANSPORT, LocalTransport.class); - addTransport(NETTY_TRANSPORT, NettyTransport.class); - } - - public void addTransportService(String name, Class clazz) { - Class oldClazz = transportServices.put(name, clazz); - if (oldClazz != null) { - throw new IllegalArgumentException("Cannot register TransportService [" + name + "] to " + clazz.getName() + ", already registered to " + oldClazz.getName()); - } - } - - public void addTransport(String name, Class clazz) { - Class oldClazz = transports.put(name, clazz); - if (oldClazz != null) { - throw new IllegalArgumentException("Cannot register Transport [" + name + "] to " + clazz.getName() + ", already registered to " + oldClazz.getName()); - } - } - - @Override - protected void configure() { - if (configuredTransportService != null) { - logger.info("Using [{}] as transport service, overridden by [{}]", configuredTransportService.getName(), configuredTransportServiceSource); - bind(TransportService.class).to(configuredTransportService).asEagerSingleton(); - } else { - String typeName = settings.get(TRANSPORT_SERVICE_TYPE_KEY); - if (typeName == null) { - bind(TransportService.class).asEagerSingleton(); - } else { - if (transportServices.containsKey(typeName) == false) { - throw new IllegalArgumentException("Unknown TransportService type [" + typeName + "], known types are: " + transportServices.keySet()); - } - bind(TransportService.class).to(transportServices.get(typeName)).asEagerSingleton(); - } - } - - bind(NamedWriteableRegistry.class).asEagerSingleton(); - if (configuredTransport != null) { - logger.info("Using [{}] as transport, overridden by [{}]", configuredTransport.getName(), configuredTransportSource); - bind(Transport.class).to(configuredTransport).asEagerSingleton(); - } else { - String defaultType = DiscoveryNode.localNode(settings) ? LOCAL_TRANSPORT : NETTY_TRANSPORT; - String typeName = settings.get(TRANSPORT_TYPE_KEY, defaultType); - Class clazz = transports.get(typeName); - if (clazz == null) { - throw new IllegalArgumentException("Unknown Transport [" + typeName + "]"); - } - bind(Transport.class).to(clazz).asEagerSingleton(); - } - } - - public void setTransportService(Class transportService, String source) { - Objects.requireNonNull(transportService, "Configured transport service may not be null"); - Objects.requireNonNull(source, "Plugin, that changes transport service may not be null"); - this.configuredTransportService = transportService; - this.configuredTransportServiceSource = source; - } - - public void setTransport(Class transport, String source) { - Objects.requireNonNull(transport, "Configured transport may not be null"); - Objects.requireNonNull(source, "Plugin, that changes transport may not be null"); - this.configuredTransport = transport; - this.configuredTransportSource = source; - } -} \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/transport/TransportService.java b/core/src/main/java/org/elasticsearch/transport/TransportService.java index 14fc9029b00..444f52b9c03 100644 --- a/core/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/core/src/main/java/org/elasticsearch/transport/TransportService.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.regex.Regex; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -37,20 +39,15 @@ import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ConcurrentMapLong; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.FutureUtils; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Callable; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; import java.util.function.Supplier; import static org.elasticsearch.common.settings.Settings.Builder.EMPTY_SETTINGS; @@ -88,14 +85,14 @@ public class TransportService extends AbstractLifecycleComponent> TRACE_LOG_INCLUDE_SETTING = Setting.listSetting("transport.tracer.include", Collections.emptyList(), Function.identity(), true, Setting.Scope.CLUSTER); + public static final Setting> TRACE_LOG_EXCLUDE_SETTING = Setting.listSetting("transport.tracer.exclude", Arrays.asList("internal:discovery/zen/fd*", TransportLivenessAction.NAME), Function.identity(), true, Setting.Scope.CLUSTER); + private final ESLogger tracerLog; volatile String[] tracerLogInclude; volatile String[] tracelLogExclude; - private final ApplySettings settingsListener = new ApplySettings(); /** if set will call requests sent to this id to shortcut and executed locally */ volatile DiscoveryNode localNode = null; @@ -109,8 +106,8 @@ public class TransportService extends AbstractLifecycleComponent tracerLogInclude) { + this.tracerLogInclude = tracerLogInclude.toArray(Strings.EMPTY_ARRAY); } - // used for testing - public void applySettings(Settings settings) { - settingsListener.onRefreshSettings(settings); + void setTracerLogExclude(List tracelLogExclude) { + this.tracelLogExclude = tracelLogExclude.toArray(Strings.EMPTY_ARRAY); } - @Override protected void doStart() { adapter.rxMetric.clear(); diff --git a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index ab39a35d224..6a6a6c38011 100644 --- a/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/core/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -87,6 +87,7 @@ import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory; import org.jboss.netty.util.HashedWheelTimer; import java.io.IOException; +import java.net.BindException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.SocketAddress; @@ -224,7 +225,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem this.connectTimeout = this.settings.getAsTime("transport.netty.connect_timeout", settings.getAsTime("transport.tcp.connect_timeout", settings.getAsTime(TCP_CONNECT_TIMEOUT, TCP_DEFAULT_CONNECT_TIMEOUT))); this.maxCumulationBufferCapacity = this.settings.getAsBytesSize("transport.netty.max_cumulation_buffer_capacity", null); this.maxCompositeBufferComponents = this.settings.getAsInt("transport.netty.max_composite_buffer_components", -1); - this.compress = settings.getAsBoolean(TransportSettings.TRANSPORT_TCP_COMPRESS, false); + this.compress = Transport.TRANSPORT_TCP_COMPRESS.get(settings); this.connectionsPerNodeRecovery = this.settings.getAsInt("transport.netty.connections_per_node.recovery", settings.getAsInt(CONNECTIONS_PER_NODE_RECOVERY, 2)); this.connectionsPerNodeBulk = this.settings.getAsInt("transport.netty.connections_per_node.bulk", settings.getAsInt(CONNECTIONS_PER_NODE_BULK, 3)); @@ -294,7 +295,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem this.serverOpenChannels = openChannels; // extract default profile first and create standard bootstrap - Map profiles = settings.getGroups("transport.profiles", true); + Map profiles = TRANSPORT_PROFILES_SETTING.get(settings()).getAsGroups(true); if (!profiles.containsKey(DEFAULT_PROFILE)) { profiles = new HashMap<>(profiles); profiles.put(DEFAULT_PROFILE, Settings.EMPTY); @@ -763,6 +764,11 @@ public class NettyTransport extends AbstractLifecycleComponent implem // close the channel as safe measure, which will cause a node to be disconnected if relevant ctx.getChannel().close(); disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); + } else if (e.getCause() instanceof BindException) { + logger.trace("bind exception caught on transport layer [{}]", e.getCause(), ctx.getChannel()); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + ctx.getChannel().close(); + disconnectFromNodeChannel(ctx.getChannel(), e.getCause()); } else if (e.getCause() instanceof CancelledKeyException) { logger.trace("cancelled key exception caught on transport layer [{}], disconnecting from relevant node", e.getCause(), ctx.getChannel()); // close the channel as safe measure, which will cause a node to be disconnected if relevant diff --git a/core/src/main/java/org/elasticsearch/tribe/TribeService.java b/core/src/main/java/org/elasticsearch/tribe/TribeService.java index f577415ee6b..78453c9eac6 100644 --- a/core/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/core/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -26,7 +26,8 @@ import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.ClusterStateTaskConfig; +import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -36,6 +37,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; @@ -205,142 +207,180 @@ public class TribeService extends AbstractLifecycleComponent { } } - class TribeClusterStateListener implements ClusterStateListener { + class TribeClusterStateListener implements ClusterStateListener { private final String tribeName; + private final TribeNodeClusterStateTaskExecutor executor; TribeClusterStateListener(Node tribeNode) { - this.tribeName = tribeNode.settings().get(TRIBE_NAME); + String tribeName = tribeNode.settings().get(TRIBE_NAME); + this.tribeName = tribeName; + executor = new TribeNodeClusterStateTaskExecutor(tribeName); } @Override public void clusterChanged(final ClusterChangedEvent event) { logger.debug("[{}] received cluster event, [{}]", tribeName, event.source()); - clusterService.submitStateUpdateTask("cluster event from " + tribeName + ", " + event.source(), new ClusterStateUpdateTask() { - @Override - public boolean runOnlyOnMaster() { - return false; + clusterService.submitStateUpdateTask( + "cluster event from " + tribeName + ", " + event.source(), + event, + ClusterStateTaskConfig.build(Priority.NORMAL), + executor, + (source, t) -> logger.warn("failed to process [{}]", t, source)); + } + } + + class TribeNodeClusterStateTaskExecutor implements ClusterStateTaskExecutor { + private final String tribeName; + + TribeNodeClusterStateTaskExecutor(String tribeName) { + this.tribeName = tribeName; + } + + + @Override + public boolean runOnlyOnMaster() { + return false; + } + + @Override + public BatchResult execute(ClusterState currentState, List tasks) throws Exception { + ClusterState accumulator = ClusterState.builder(currentState).build(); + BatchResult.Builder builder = BatchResult.builder(); + + try { + // we only need to apply the latest cluster state update + accumulator = applyUpdate(accumulator, tasks.get(tasks.size() - 1)); + builder.successes(tasks); + } catch (Throwable t) { + builder.failures(tasks, t); + } + + return builder.build(accumulator); + } + + private ClusterState applyUpdate(ClusterState currentState, ClusterChangedEvent task) { + boolean clusterStateChanged = false; + ClusterState tribeState = task.state(); + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes()); + // -- merge nodes + // go over existing nodes, and see if they need to be removed + for (DiscoveryNode discoNode : currentState.nodes()) { + String markedTribeName = discoNode.attributes().get(TRIBE_NAME); + if (markedTribeName != null && markedTribeName.equals(tribeName)) { + if (tribeState.nodes().get(discoNode.id()) == null) { + clusterStateChanged = true; + logger.info("[{}] removing node [{}]", tribeName, discoNode); + nodes.remove(discoNode.id()); + } } + } + // go over tribe nodes, and see if they need to be added + for (DiscoveryNode tribe : tribeState.nodes()) { + if (currentState.nodes().get(tribe.id()) == null) { + // a new node, add it, but also add the tribe name to the attributes + Map tribeAttr = new HashMap<>(); + for (ObjectObjectCursor attr : tribe.attributes()) { + tribeAttr.put(attr.key, attr.value); + } + tribeAttr.put(TRIBE_NAME, tribeName); + DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); + clusterStateChanged = true; + logger.info("[{}] adding node [{}]", tribeName, discoNode); + nodes.put(discoNode); + } + } - @Override - public ClusterState execute(ClusterState currentState) throws Exception { - ClusterState tribeState = event.state(); - DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(currentState.nodes()); - // -- merge nodes - // go over existing nodes, and see if they need to be removed - for (DiscoveryNode discoNode : currentState.nodes()) { - String markedTribeName = discoNode.attributes().get(TRIBE_NAME); - if (markedTribeName != null && markedTribeName.equals(tribeName)) { - if (tribeState.nodes().get(discoNode.id()) == null) { - logger.info("[{}] removing node [{}]", tribeName, discoNode); - nodes.remove(discoNode.id()); - } - } + // -- merge metadata + ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); + MetaData.Builder metaData = MetaData.builder(currentState.metaData()); + RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); + // go over existing indices, and see if they need to be removed + for (IndexMetaData index : currentState.metaData()) { + String markedTribeName = index.getSettings().get(TRIBE_NAME); + if (markedTribeName != null && markedTribeName.equals(tribeName)) { + IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex()); + clusterStateChanged = true; + if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) { + logger.info("[{}] removing index [{}]", tribeName, index.getIndex()); + removeIndex(blocks, metaData, routingTable, index); + } else { + // always make sure to update the metadata and routing table, in case + // there are changes in them (new mapping, shards moving from initializing to started) + routingTable.add(tribeState.routingTable().index(index.getIndex())); + Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); + metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); } - // go over tribe nodes, and see if they need to be added - for (DiscoveryNode tribe : tribeState.nodes()) { - if (currentState.nodes().get(tribe.id()) == null) { - // a new node, add it, but also add the tribe name to the attributes - Map tribeAttr = new HashMap<>(); - for (ObjectObjectCursor attr : tribe.attributes()) { - tribeAttr.put(attr.key, attr.value); - } - tribeAttr.put(TRIBE_NAME, tribeName); - DiscoveryNode discoNode = new DiscoveryNode(tribe.name(), tribe.id(), tribe.getHostName(), tribe.getHostAddress(), tribe.address(), unmodifiableMap(tribeAttr), tribe.version()); - logger.info("[{}] adding node [{}]", tribeName, discoNode); - nodes.put(discoNode); - } + } + } + // go over tribe one, and see if they need to be added + for (IndexMetaData tribeIndex : tribeState.metaData()) { + // if there is no routing table yet, do nothing with it... + IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex()); + if (table == null) { + continue; + } + final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex()); + if (indexMetaData == null) { + if (!droppedIndices.contains(tribeIndex.getIndex())) { + // a new index, add it, and add the tribe name as a setting + clusterStateChanged = true; + logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex()); + addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); } - - // -- merge metadata - ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - MetaData.Builder metaData = MetaData.builder(currentState.metaData()); - RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); - // go over existing indices, and see if they need to be removed - for (IndexMetaData index : currentState.metaData()) { - String markedTribeName = index.getSettings().get(TRIBE_NAME); - if (markedTribeName != null && markedTribeName.equals(tribeName)) { - IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex()); - if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) { - logger.info("[{}] removing index [{}]", tribeName, index.getIndex()); - removeIndex(blocks, metaData, routingTable, index); - } else { - // always make sure to update the metadata and routing table, in case - // there are changes in them (new mapping, shards moving from initializing to started) - routingTable.add(tribeState.routingTable().index(index.getIndex())); - Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); - metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); - } - } - } - // go over tribe one, and see if they need to be added - for (IndexMetaData tribeIndex : tribeState.metaData()) { - // if there is no routing table yet, do nothing with it... - IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex()); - if (table == null) { - continue; - } - final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex()); - if (indexMetaData == null) { - if (!droppedIndices.contains(tribeIndex.getIndex())) { - // a new index, add it, and add the tribe name as a setting - logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex()); + } else { + String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME); + if (!tribeName.equals(existingFromTribe)) { + // we have a potential conflict on index names, decide what to do... + if (ON_CONFLICT_ANY.equals(onConflict)) { + // we chose any tribe, carry on + } else if (ON_CONFLICT_DROP.equals(onConflict)) { + // drop the indices, there is a conflict + clusterStateChanged = true; + logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); + removeIndex(blocks, metaData, routingTable, tribeIndex); + droppedIndices.add(tribeIndex.getIndex()); + } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { + // on conflict, prefer a tribe... + String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length()); + if (tribeName.equals(preferredTribeName)) { + // the new one is hte preferred one, replace... + clusterStateChanged = true; + logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); + removeIndex(blocks, metaData, routingTable, tribeIndex); addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); - } - } else { - String existingFromTribe = indexMetaData.getSettings().get(TRIBE_NAME); - if (!tribeName.equals(existingFromTribe)) { - // we have a potential conflict on index names, decide what to do... - if (ON_CONFLICT_ANY.equals(onConflict)) { - // we chose any tribe, carry on - } else if (ON_CONFLICT_DROP.equals(onConflict)) { - // drop the indices, there is a conflict - logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); - removeIndex(blocks, metaData, routingTable, tribeIndex); - droppedIndices.add(tribeIndex.getIndex()); - } else if (onConflict.startsWith(ON_CONFLICT_PREFER)) { - // on conflict, prefer a tribe... - String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length()); - if (tribeName.equals(preferredTribeName)) { - // the new one is hte preferred one, replace... - logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe); - removeIndex(blocks, metaData, routingTable, tribeIndex); - addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex); - } // else: either the existing one is the preferred one, or we haven't seen one, carry on - } - } + } // else: either the existing one is the preferred one, or we haven't seen one, carry on } } - - return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build(); } + } - private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) { - metaData.remove(index.getIndex()); - routingTable.remove(index.getIndex()); - blocks.removeIndexBlocks(index.getIndex()); - } + if (!clusterStateChanged) { + return currentState; + } else { + return ClusterState.builder(currentState).incrementVersion().blocks(blocks).nodes(nodes).metaData(metaData).routingTable(routingTable.build()).build(); + } + } - private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { - Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); - metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); - routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); - if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) { - blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK); - } - if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) { - blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK); - } - if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) { - blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK); - } - } + private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) { + metaData.remove(index.getIndex()); + routingTable.remove(index.getIndex()); + blocks.removeIndexBlocks(index.getIndex()); + } - @Override - public void onFailure(String source, Throwable t) { - logger.warn("failed to process [{}]", t, source); - } - }); + private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) { + Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build(); + metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings)); + routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex())); + if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) { + blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK); + } + if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) { + blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK); + } + if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) { + blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK); + } } } } diff --git a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help index 2a4e6a6382c..8c73e3837a4 100644 --- a/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help +++ b/core/src/main/resources/org/elasticsearch/plugins/plugin-install.help @@ -50,6 +50,7 @@ OFFICIAL PLUGINS - mapper-murmur3 - mapper-size - repository-azure + - repository-hdfs - repository-s3 - store-smb diff --git a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java index 0f29ed5a2f7..a287ec119e7 100644 --- a/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java +++ b/core/src/test/java/org/apache/lucene/queries/BlendedTermQueryTests.java @@ -23,19 +23,8 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.MultiReader; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.BooleanClause; -import org.apache.lucene.search.BooleanQuery; -import org.apache.lucene.search.DisjunctionMaxQuery; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.QueryUtils; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.TermQuery; -import org.apache.lucene.search.TopDocs; +import org.apache.lucene.index.*; +import org.apache.lucene.search.*; import org.apache.lucene.search.similarities.BM25Similarity; import org.apache.lucene.search.similarities.DefaultSimilarity; import org.apache.lucene.search.similarities.Similarity; @@ -44,11 +33,7 @@ import org.apache.lucene.util.TestUtil; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; diff --git a/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java new file mode 100644 index 00000000000..bd1377b89fe --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.settings; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.concurrent.atomic.AtomicReference; + +public class SettingsUpdaterTests extends ESTestCase { + + + public void testUpdateSetting() { + AtomicReference index = new AtomicReference<>(); + AtomicReference shard = new AtomicReference<>(); + ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); + ClusterSettings settingsService = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); + SettingsUpdater updater = new SettingsUpdater(settingsService); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) + .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + ClusterState build = builder.metaData(metaData).build(); + ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5).build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.4).build()); + assertNotSame(clusterState, build); + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 0.4, 0.1); + assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 2.5, 0.1); + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().transientSettings()), 0.5, 0.1); + assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().transientSettings()), 4.5, 0.1); + + clusterState = updater.updateSettings(clusterState, Settings.builder().putNull("cluster.routing.*").build(), + Settings.EMPTY); + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 0.4, 0.1); + assertEquals(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 2.5, 0.1); + assertFalse(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + + clusterState = updater.updateSettings(clusterState, + Settings.EMPTY, Settings.builder().putNull("cluster.routing.*").put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 10.0).build()); + + assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 10.0, 0.1); + assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().persistentSettings())); + assertFalse(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); + assertNull("updater only does a dryRun", index.get()); + assertNull("updater only does a dryRun", shard.get()); + } + + public void testAllOrNothing() { + ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); + ClusterSettings settingsService = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AtomicReference index = new AtomicReference<>(); + AtomicReference shard = new AtomicReference<>(); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); + SettingsUpdater updater = new SettingsUpdater(settingsService); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) + .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + ClusterState build = builder.metaData(metaData).build(); + + try { + updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build()); + fail("all or nothing"); + } catch (IllegalArgumentException ex) { + logger.info("", ex); + assertEquals("Failed to parse value [not a float] for setting [cluster.routing.allocation.balance.index]", ex.getMessage()); + } + assertNull("updater only does a dryRun", index.get()); + assertNull("updater only does a dryRun", shard.get()); + } + + public void testClusterBlock() { + ClusterState.Builder builder = ClusterState.builder(new ClusterName("foo")); + ClusterSettings settingsService = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AtomicReference index = new AtomicReference<>(); + AtomicReference shard = new AtomicReference<>(); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING, index::set); + settingsService.addSettingsUpdateConsumer(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING, shard::set); + SettingsUpdater updater = new SettingsUpdater(settingsService); + MetaData.Builder metaData = MetaData.builder() + .persistentSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 2.5).build()) + .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); + ClusterState build = builder.metaData(metaData).build(); + + ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true).build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6).put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build()); + assertEquals(clusterState.blocks().global().size(), 1); + assertEquals(clusterState.blocks().global().iterator().next(), MetaData.CLUSTER_READ_ONLY_BLOCK); + + clusterState = updater.updateSettings(build, Settings.EMPTY, + Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), false).build()); + assertEquals(clusterState.blocks().global().size(), 0); + + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index bb154218215..3ce9e99f4dc 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -285,4 +285,11 @@ public class CreateIndexIT extends ESIntegTestCase { assertThat(messages.toString(), containsString("mapper [text] is used by multiple types")); } } + + public void testRestartIndexCreationAfterFullClusterRestart() throws Exception { + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")).get(); + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).get(); + internalCluster().fullRestart(); + ensureGreen("test"); + } } diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java similarity index 51% rename from core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java rename to core/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java index 19cce93c6e4..dfc6ea67c49 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUnitTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/flush/SyncedFlushUnitTests.java @@ -17,16 +17,20 @@ * under the License. */ -package org.elasticsearch.indices.flush; +package org.elasticsearch.action.admin.indices.flush; import com.carrotsearch.hppc.ObjectIntHashMap; import com.carrotsearch.hppc.ObjectIntMap; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse.ShardCounts; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.flush.IndicesSyncedFlushResult.ShardCounts; -import org.elasticsearch.indices.flush.SyncedFlushService.SyncedFlushResponse; +import org.elasticsearch.indices.flush.ShardsSyncedFlushResult; +import org.elasticsearch.indices.flush.SyncedFlushService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -42,14 +46,11 @@ import static org.hamcrest.Matchers.hasSize; public class SyncedFlushUnitTests extends ESTestCase { - private static class TestPlan { - public ShardCounts totalCounts; - public Map countsPerIndex = new HashMap<>(); + public SyncedFlushResponse.ShardCounts totalCounts; + public Map countsPerIndex = new HashMap<>(); public ObjectIntMap expectedFailuresPerIndex = new ObjectIntHashMap<>(); - - public IndicesSyncedFlushResult result; - + public SyncedFlushResponse result; } public void testIndicesSyncedFlushResult() throws IOException { @@ -76,6 +77,56 @@ public class SyncedFlushUnitTests extends ESTestCase { } } + public void testResponseStreaming() throws IOException { + final TestPlan testPlan = createTestPlan(); + assertThat(testPlan.result.totalShards(), equalTo(testPlan.totalCounts.total)); + assertThat(testPlan.result.successfulShards(), equalTo(testPlan.totalCounts.successful)); + assertThat(testPlan.result.failedShards(), equalTo(testPlan.totalCounts.failed)); + assertThat(testPlan.result.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); + BytesStreamOutput out = new BytesStreamOutput(); + testPlan.result.writeTo(out); + StreamInput in = StreamInput.wrap(out.bytes()); + SyncedFlushResponse readResponse = new SyncedFlushResponse(); + readResponse.readFrom(in); + assertThat(readResponse.totalShards(), equalTo(testPlan.totalCounts.total)); + assertThat(readResponse.successfulShards(), equalTo(testPlan.totalCounts.successful)); + assertThat(readResponse.failedShards(), equalTo(testPlan.totalCounts.failed)); + assertThat(readResponse.restStatus(), equalTo(testPlan.totalCounts.failed > 0 ? RestStatus.CONFLICT : RestStatus.OK)); + assertThat(readResponse.shardsResultPerIndex.size(), equalTo(testPlan.result.getShardsResultPerIndex().size())); + for (Map.Entry> entry : readResponse.getShardsResultPerIndex().entrySet()) { + List originalShardsResults = testPlan.result.getShardsResultPerIndex().get(entry.getKey()); + assertNotNull(originalShardsResults); + List readShardsResults = entry.getValue(); + assertThat(readShardsResults.size(), equalTo(originalShardsResults.size())); + for (int i = 0; i < readShardsResults.size(); i++) { + ShardsSyncedFlushResult originalShardResult = originalShardsResults.get(i); + ShardsSyncedFlushResult readShardResult = readShardsResults.get(i); + assertThat(originalShardResult.failureReason(), equalTo(readShardResult.failureReason())); + assertThat(originalShardResult.failed(), equalTo(readShardResult.failed())); + assertThat(originalShardResult.getShardId(), equalTo(readShardResult.getShardId())); + assertThat(originalShardResult.successfulShards(), equalTo(readShardResult.successfulShards())); + assertThat(originalShardResult.syncId(), equalTo(readShardResult.syncId())); + assertThat(originalShardResult.totalShards(), equalTo(readShardResult.totalShards())); + assertThat(originalShardResult.failedShards().size(), equalTo(readShardResult.failedShards().size())); + for (Map.Entry shardEntry : originalShardResult.failedShards().entrySet()) { + SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.failedShards().get(shardEntry.getKey()); + assertNotNull(readShardResponse); + SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); + assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); + assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); + } + assertThat(originalShardResult.shardResponses().size(), equalTo(readShardResult.shardResponses().size())); + for (Map.Entry shardEntry : originalShardResult.shardResponses().entrySet()) { + SyncedFlushService.ShardSyncedFlushResponse readShardResponse = readShardResult.shardResponses().get(shardEntry.getKey()); + assertNotNull(readShardResponse); + SyncedFlushService.ShardSyncedFlushResponse originalShardResponse = shardEntry.getValue(); + assertThat(originalShardResponse.failureReason(), equalTo(readShardResponse.failureReason())); + assertThat(originalShardResponse.success(), equalTo(readShardResponse.success())); + } + } + } + } + private void assertShardCount(String name, Map header, ShardCounts expectedCounts) { assertThat(name + " has unexpected total count", (Integer) header.get("total"), equalTo(expectedCounts.total)); assertThat(name + " has unexpected successful count", (Integer) header.get("successful"), equalTo(expectedCounts.successful)); @@ -105,32 +156,33 @@ public class SyncedFlushUnitTests extends ESTestCase { failures++; shardsResults.add(new ShardsSyncedFlushResult(shardId, replicas + 1, "simulated total failure")); } else { - Map shardResponses = new HashMap<>(); + Map shardResponses = new HashMap<>(); for (int copy = 0; copy < replicas + 1; copy++) { final ShardRouting shardRouting = TestShardRouting.newShardRouting(index, shard, "node_" + shardId + "_" + copy, null, - copy == 0, ShardRoutingState.STARTED, 0); + copy == 0, ShardRoutingState.STARTED, 0); if (randomInt(5) < 2) { // shard copy failure failed++; failures++; - shardResponses.put(shardRouting, new SyncedFlushResponse("copy failure " + shardId)); + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse("copy failure " + shardId)); } else { successful++; - shardResponses.put(shardRouting, new SyncedFlushResponse()); + shardResponses.put(shardRouting, new SyncedFlushService.ShardSyncedFlushResponse()); } } shardsResults.add(new ShardsSyncedFlushResult(shardId, "_sync_id_" + shard, replicas + 1, shardResponses)); } } indicesResults.put(index, shardsResults); - testPlan.countsPerIndex.put(index, new ShardCounts(shards * (replicas + 1), successful, failed)); + testPlan.countsPerIndex.put(index, new SyncedFlushResponse.ShardCounts(shards * (replicas + 1), successful, failed)); testPlan.expectedFailuresPerIndex.put(index, failures); totalFailed += failed; totalShards += shards * (replicas + 1); totalSuccesful += successful; } - testPlan.result = new IndicesSyncedFlushResult(indicesResults); - testPlan.totalCounts = new ShardCounts(totalShards, totalSuccesful, totalFailed); + testPlan.result = new SyncedFlushResponse(indicesResults); + testPlan.totalCounts = new SyncedFlushResponse.ShardCounts(totalShards, totalSuccesful, totalFailed); return testPlan; } + } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index ffb9e630b70..ebd32ccb482 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -87,6 +87,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { for (ObjectCursor> shardStoreStatuses : shardStores.values()) { for (IndicesShardStoresResponse.StoreStatus storeStatus : shardStoreStatuses.value) { assertThat(storeStatus.getVersion(), greaterThan(-1l)); + assertThat(storeStatus.getAllocationId(), notNullValue()); assertThat(storeStatus.getNode(), notNullValue()); assertThat(storeStatus.getStoreException(), nullValue()); } @@ -108,7 +109,7 @@ public class IndicesShardStoreRequestIT extends ESIntegTestCase { assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); for (IntObjectCursor> storesStatus : shardStoresStatuses) { assertThat("must report for one store", storesStatus.value.size(), equalTo(1)); - assertThat("reported store should be primary", storesStatus.value.get(0).getAllocation(), equalTo(IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY)); + assertThat("reported store should be primary", storesStatus.value.get(0).getAllocationStatus(), equalTo(IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY)); } logger.info("--> enable allocation"); enableAllocation(index); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java index c862583a3f7..6d1159c82a5 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreResponseTests.java @@ -22,24 +22,17 @@ package org.elasticsearch.action.admin.indices.shards; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.transport.DummyTransportAddress; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.NodeDisconnectedException; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static org.hamcrest.Matchers.equalTo; @@ -52,9 +45,9 @@ public class IndicesShardStoreResponseTests extends ESTestCase { DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); DiscoveryNode node2 = new DiscoveryNode("node2", DummyTransportAddress.INSTANCE, Version.CURRENT); List storeStatusList = new ArrayList<>(); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); - storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, new IOException("corrupted"))); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, null, IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node2, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + storeStatusList.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, new IOException("corrupted"))); storeStatuses.put(0, storeStatusList); storeStatuses.put(1, storeStatusList); ImmutableOpenIntMap> storesMap = storeStatuses.build(); @@ -97,8 +90,10 @@ public class IndicesShardStoreResponseTests extends ESTestCase { IndicesShardStoresResponse.StoreStatus storeStatus = storeStatusList.get(i); assertThat(storeInfo.containsKey("version"), equalTo(true)); assertThat(((int) storeInfo.get("version")), equalTo(((int) storeStatus.getVersion()))); + assertThat(storeInfo.containsKey("allocation_id"), equalTo(true)); + assertThat(((String) storeInfo.get("allocation_id")), equalTo((storeStatus.getAllocationId()))); assertThat(storeInfo.containsKey("allocation"), equalTo(true)); - assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocation().value())); + assertThat(((String) storeInfo.get("allocation")), equalTo(storeStatus.getAllocationStatus().value())); assertThat(storeInfo.containsKey(storeStatus.getNode().id()), equalTo(true)); if (storeStatus.getStoreException() != null) { assertThat(storeInfo.containsKey("store_exception"), equalTo(true)); @@ -112,14 +107,14 @@ public class IndicesShardStoreResponseTests extends ESTestCase { public void testStoreStatusOrdering() throws Exception { DiscoveryNode node1 = new DiscoveryNode("node1", DummyTransportAddress.INSTANCE, Version.CURRENT); List orderedStoreStatuses = new ArrayList<>(); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.PRIMARY, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, IndicesShardStoresResponse.StoreStatus.Allocation.UNUSED, null)); - orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, IndicesShardStoresResponse.StoreStatus.Allocation.REPLICA, new IOException("corrupted"))); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 2, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.PRIMARY, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 1, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.UNUSED, null)); + orderedStoreStatuses.add(new IndicesShardStoresResponse.StoreStatus(node1, 3, Strings.randomBase64UUID(), IndicesShardStoresResponse.StoreStatus.AllocationStatus.REPLICA, new IOException("corrupted"))); List storeStatuses = new ArrayList<>(orderedStoreStatuses); - Collections.shuffle(storeStatuses); + Collections.shuffle(storeStatuses, random()); CollectionUtil.timSort(storeStatuses); assertThat(storeStatuses, equalTo(orderedStoreStatuses)); } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java index 143300ecc07..a38a46d0deb 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/upgrade/UpgradeIT.java @@ -65,7 +65,7 @@ public class UpgradeIT extends ESBackcompatTestCase { public void testUpgrade() throws Exception { // allow the cluster to rebalance quickly - 2 concurrent rebalance are default we can do higher Settings.Builder builder = Settings.builder(); - builder.put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 100); + builder.put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 100); client().admin().cluster().prepareUpdateSettings().setPersistentSettings(builder).get(); int numIndexes = randomIntBetween(2, 4); @@ -117,13 +117,13 @@ public class UpgradeIT extends ESBackcompatTestCase { ensureGreen(); // disable allocation entirely until all nodes are upgraded builder = Settings.builder(); - builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE); + builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE); client().admin().cluster().prepareUpdateSettings().setTransientSettings(builder).get(); backwardsCluster().upgradeAllNodes(); builder = Settings.builder(); // disable rebalanceing entirely for the time being otherwise we might get relocations / rebalance from nodes with old segments - builder.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE); - builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.ALL); + builder.put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE); + builder.put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.ALL); client().admin().cluster().prepareUpdateSettings().setTransientSettings(builder).get(); ensureGreen(); logger.info("--> Nodes upgrade complete"); diff --git a/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java new file mode 100644 index 00000000000..3c38e2ef0fa --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -0,0 +1,164 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matcher; + +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class BulkProcessorRetryIT extends ESIntegTestCase { + private static final String INDEX_NAME = "test"; + private static final String TYPE_NAME = "type"; + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + //Have very low pool and queue sizes to overwhelm internal pools easily + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put("threadpool.generic.size", 1) + .put("threadpool.generic.queue_size", 1) + // don't mess with this one! It's quite sensitive to a low queue size + // (see also ThreadedActionListener which is happily spawning threads even when we already got rejected) + //.put("threadpool.listener.queue_size", 1) + .put("threadpool.get.queue_size", 1) + // default is 50 + .put("threadpool.bulk.queue_size", 20) + .build(); + } + + + public void testBulkRejectionLoadWithoutBackoff() throws Throwable { + boolean rejectedExecutionExpected = true; + executeBulkRejectionLoad(BackoffPolicy.noBackoff(), rejectedExecutionExpected); + } + + public void testBulkRejectionLoadWithBackoff() throws Throwable { + boolean rejectedExecutionExpected = false; + executeBulkRejectionLoad(BackoffPolicy.exponentialBackoff(), rejectedExecutionExpected); + } + + private void executeBulkRejectionLoad(BackoffPolicy backoffPolicy, boolean rejectedExecutionExpected) throws Throwable { + int numberOfAsyncOps = randomIntBetween(600, 700); + final CountDownLatch latch = new CountDownLatch(numberOfAsyncOps); + final Set responses = Collections.newSetFromMap(new ConcurrentHashMap<>()); + + assertAcked(prepareCreate(INDEX_NAME)); + ensureGreen(); + + BulkProcessor bulkProcessor = BulkProcessor.builder(client(), new BulkProcessor.Listener() { + @Override + public void beforeBulk(long executionId, BulkRequest request) { + // no op + } + + @Override + public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { + responses.add(response); + latch.countDown(); + } + + @Override + public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + responses.add(failure); + latch.countDown(); + } + }).setBulkActions(1) + // zero means that we're in the sync case, more means that we're in the async case + .setConcurrentRequests(randomIntBetween(0, 100)) + .setBackoffPolicy(backoffPolicy) + .build(); + indexDocs(bulkProcessor, numberOfAsyncOps); + latch.await(10, TimeUnit.SECONDS); + bulkProcessor.close(); + + assertThat(responses.size(), equalTo(numberOfAsyncOps)); + + // validate all responses + for (Object response : responses) { + if (response instanceof BulkResponse) { + BulkResponse bulkResponse = (BulkResponse) response; + for (BulkItemResponse bulkItemResponse : bulkResponse.getItems()) { + if (bulkItemResponse.isFailed()) { + BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); + Throwable rootCause = ExceptionsHelper.unwrapCause(failure.getCause()); + if (rootCause instanceof EsRejectedExecutionException) { + if (rejectedExecutionExpected == false) { + // we're not expecting that we overwhelmed it even once + throw new AssertionError("Unexpected failure reason", rootCause); + } + } else { + throw new AssertionError("Unexpected failure", rootCause); + } + } + } + } else { + Throwable t = (Throwable) response; + // we're not expecting any other errors + throw new AssertionError("Unexpected failure", t); + } + } + + client().admin().indices().refresh(new RefreshRequest()).get(); + + // validate we did not create any duplicates due to retries + Matcher searchResultCount; + if (rejectedExecutionExpected) { + // it is ok if we lost some index operations to rejected executions + searchResultCount = lessThanOrEqualTo((long) numberOfAsyncOps); + } else { + searchResultCount = equalTo((long) numberOfAsyncOps); + } + + SearchResponse results = client() + .prepareSearch(INDEX_NAME) + .setTypes(TYPE_NAME) + .setQuery(QueryBuilders.matchAllQuery()) + .setSize(0) + .get(); + assertThat(results.getHits().totalHits(), searchResultCount); + } + + private static void indexDocs(BulkProcessor processor, int numDocs) { + for (int i = 1; i <= numDocs; i++) { + processor.add(client() + .prepareIndex() + .setIndex(INDEX_NAME) + .setType(TYPE_NAME) + .setId(Integer.toString(i)) + .setSource("field", randomRealisticUnicodeOfLengthBetween(1, 30)) + .request()); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java new file mode 100644 index 00000000000..4d73f932d2f --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -0,0 +1,220 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.bulk; + +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.delete.DeleteResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; +import org.elasticsearch.rest.NoOpClient; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.*; + +public class RetryTests extends ESTestCase { + // no need to wait fof a long time in tests + private static final TimeValue DELAY = TimeValue.timeValueMillis(1L); + private static final int CALLS_TO_FAIL = 5; + + private MockBulkClient bulkClient; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + this.bulkClient = new MockBulkClient(getTestName(), CALLS_TO_FAIL); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + this.bulkClient.close(); + } + + private BulkRequest createBulkRequest() { + BulkRequest request = new BulkRequest(); + request.add(new UpdateRequest("shop", "products", "1")); + request.add(new UpdateRequest("shop", "products", "2")); + request.add(new UpdateRequest("shop", "products", "3")); + request.add(new UpdateRequest("shop", "products", "4")); + request.add(new UpdateRequest("shop", "products", "5")); + return request; + } + + public void testSyncRetryBacksOff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); + + BulkRequest bulkRequest = createBulkRequest(); + BulkResponse response = Retry + .on(EsRejectedExecutionException.class) + .policy(backoff) + .withSyncBackoff(bulkClient, bulkRequest); + + assertFalse(response.hasFailures()); + assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); + } + + public void testSyncRetryFailsAfterBackoff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); + + BulkRequest bulkRequest = createBulkRequest(); + BulkResponse response = Retry + .on(EsRejectedExecutionException.class) + .policy(backoff) + .withSyncBackoff(bulkClient, bulkRequest); + + assertTrue(response.hasFailures()); + assertThat(response.getItems().length, equalTo(bulkRequest.numberOfActions())); + } + + public void testAsyncRetryBacksOff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL); + AssertingListener listener = new AssertingListener(); + + BulkRequest bulkRequest = createBulkRequest(); + Retry.on(EsRejectedExecutionException.class) + .policy(backoff) + .withAsyncBackoff(bulkClient, bulkRequest, listener); + + listener.awaitCallbacksCalled(); + listener.assertOnResponseCalled(); + listener.assertResponseWithoutFailures(); + listener.assertResponseWithNumberOfItems(bulkRequest.numberOfActions()); + listener.assertOnFailureNeverCalled(); + } + + public void testAsyncRetryFailsAfterBacksOff() throws Exception { + BackoffPolicy backoff = BackoffPolicy.constantBackoff(DELAY, CALLS_TO_FAIL - 1); + AssertingListener listener = new AssertingListener(); + + BulkRequest bulkRequest = createBulkRequest(); + Retry.on(EsRejectedExecutionException.class) + .policy(backoff) + .withAsyncBackoff(bulkClient, bulkRequest, listener); + + listener.awaitCallbacksCalled(); + + listener.assertOnResponseCalled(); + listener.assertResponseWithFailures(); + listener.assertResponseWithNumberOfItems(bulkRequest.numberOfActions()); + listener.assertOnFailureNeverCalled(); + } + + private static class AssertingListener implements ActionListener { + private final CountDownLatch latch; + private final AtomicInteger countOnResponseCalled = new AtomicInteger(); + private volatile Throwable lastFailure; + private volatile BulkResponse response; + + private AssertingListener() { + latch = new CountDownLatch(1); + } + + public void awaitCallbacksCalled() throws InterruptedException { + latch.await(); + } + + @Override + public void onResponse(BulkResponse bulkItemResponses) { + this.response = bulkItemResponses; + countOnResponseCalled.incrementAndGet(); + latch.countDown(); + } + + @Override + public void onFailure(Throwable e) { + this.lastFailure = e; + latch.countDown(); + } + + public void assertOnResponseCalled() { + assertThat(countOnResponseCalled.get(), equalTo(1)); + } + + public void assertResponseWithNumberOfItems(int numItems) { + assertThat(response.getItems().length, equalTo(numItems)); + } + + public void assertResponseWithoutFailures() { + assertThat(response, notNullValue()); + assertFalse("Response should not have failures", response.hasFailures()); + } + + public void assertResponseWithFailures() { + assertThat(response, notNullValue()); + assertTrue("Response should have failures", response.hasFailures()); + } + + public void assertOnFailureNeverCalled() { + assertThat(lastFailure, nullValue()); + } + } + + private static class MockBulkClient extends NoOpClient { + private int numberOfCallsToFail; + + private MockBulkClient(String testName, int numberOfCallsToFail) { + super(testName); + this.numberOfCallsToFail = numberOfCallsToFail; + } + + @Override + public ActionFuture bulk(BulkRequest request) { + PlainActionFuture responseFuture = new PlainActionFuture<>(); + bulk(request, responseFuture); + return responseFuture; + } + + @Override + public void bulk(BulkRequest request, ActionListener listener) { + // do everything synchronously, that's fine for a test + boolean shouldFail = numberOfCallsToFail > 0; + numberOfCallsToFail--; + + BulkItemResponse[] itemResponses = new BulkItemResponse[request.requests().size()]; + // if we have to fail, we need to fail at least once "reliably", the rest can be random + int itemToFail = randomInt(request.requests().size() - 1); + for (int idx = 0; idx < request.requests().size(); idx++) { + if (shouldFail && (randomBoolean() || idx == itemToFail)) { + itemResponses[idx] = failedResponse(); + } else { + itemResponses[idx] = successfulResponse(); + } + } + listener.onResponse(new BulkResponse(itemResponses, 1000L)); + } + + private BulkItemResponse successfulResponse() { + return new BulkItemResponse(1, "update", new DeleteResponse()); + } + + private BulkItemResponse failedResponse() { + return new BulkItemResponse(1, "update", new BulkItemResponse.Failure("test", "test", "1", new EsRejectedExecutionException("pool full"))); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java index 825e3e40894..b0c13f851a6 100644 --- a/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java +++ b/core/src/test/java/org/elasticsearch/action/support/master/IndexingMasterFailoverIT.java @@ -1,5 +1,24 @@ package org.elasticsearch.action.support.master; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -45,8 +64,8 @@ public class IndexingMasterFailoverIT extends ESIntegTestCase { .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly .put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out - .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) .build(); internalCluster().startMasterOnlyNodesAsync(3, sharedSettings).get(); diff --git a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java index 3a3876b60b1..1cbe05da6a0 100644 --- a/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/core/src/test/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -865,7 +865,7 @@ public class IndexAliasesIT extends ESIntegTestCase { assertAcked(prepareCreate("test") .addMapping("type", "field", "type=string") .addAlias(new Alias("alias1")) - .addAlias(new Alias("alias2").filter(QueryBuilders.missingQuery("field"))) + .addAlias(new Alias("alias2").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("field")))) .addAlias(new Alias("alias3").indexRouting("index").searchRouting("search"))); checkAliases(); diff --git a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java index fbc6cc58067..95735b8648f 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityIT.java @@ -71,7 +71,6 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.constantScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.existsQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -440,25 +439,9 @@ public class BasicBackwardsCompatibilityIT extends ESBackcompatTestCase { countResponse = client().prepareSearch().setSize(0).setQuery(existsQuery("obj1")).get(); assertHitCount(countResponse, 2l); - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get(); - assertHitCount(countResponse, 2l); - - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("field1")).get(); - assertHitCount(countResponse, 2l); - - countResponse = client().prepareSearch().setSize(0).setQuery(constantScoreQuery(missingQuery("field1"))).get(); - assertHitCount(countResponse, 2l); - countResponse = client().prepareSearch().setSize(0).setQuery(queryStringQuery("_missing_:field1")).get(); assertHitCount(countResponse, 2l); - // wildcard check - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("x*")).get(); - assertHitCount(countResponse, 2l); - - // object check - countResponse = client().prepareSearch().setSize(0).setQuery(missingQuery("obj1")).get(); - assertHitCount(countResponse, 2l); if (!backwardsCluster().upgradeOneNode()) { break; } diff --git a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java index 137c6c5b2c2..bf71c0c2467 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityIT.java @@ -32,8 +32,6 @@ import org.elasticsearch.action.admin.indices.upgrade.UpgradeIT; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; @@ -62,23 +60,12 @@ import org.junit.Before; import java.io.IOException; import java.io.InputStream; -import java.nio.file.DirectoryStream; -import java.nio.file.FileVisitResult; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.SimpleFileVisitor; +import java.nio.file.*; import java.nio.file.attribute.BasicFileAttributes; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.SortedSet; -import java.util.TreeSet; +import java.util.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.greaterThanOrEqualTo; // needs at least 2 nodes since it bumps replicas to 1 @@ -120,7 +107,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { public Settings nodeSettings(int ord) { return Settings.builder() .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) // disable merging so no segments will be upgraded - .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 30) // increase recovery speed for small files + .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), 30) // increase recovery speed for small files .build(); } @@ -271,7 +258,7 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { public void testOldIndexes() throws Exception { setupCluster(); - Collections.shuffle(indexes, getRandom()); + Collections.shuffle(indexes, random()); for (String index : indexes) { long startTime = System.currentTimeMillis(); logger.info("--> Testing old index " + index); @@ -341,13 +328,6 @@ public class OldIndexBackwardsCompatibilityIT extends ESIntegTestCase { searchRsp = searchReq.get(); ElasticsearchAssertions.assertNoFailures(searchRsp); assertEquals(numDocs, searchRsp.getHits().getTotalHits()); - - logger.info("--> testing missing filter"); - // the field for the missing filter here needs to be different than the exists filter above, to avoid being found in the cache - searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.missingQuery("long_sort")); - searchRsp = searchReq.get(); - ElasticsearchAssertions.assertNoFailures(searchRsp); - assertEquals(0, searchRsp.getHits().getTotalHits()); } void assertBasicAggregationWorks(String indexName) { diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java index 228f1a65121..6ad05b3ff84 100644 --- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java +++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java @@ -181,7 +181,7 @@ public class RestoreBackwardsCompatIT extends AbstractSnapshotIntegTestCase { logger.info("--> check settings"); ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); - assertThat(clusterState.metaData().persistentSettings().get(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP + "version_attr"), equalTo(version)); + assertThat(clusterState.metaData().persistentSettings().get(FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getKey() + "version_attr"), equalTo(version)); logger.info("--> check templates"); IndexTemplateMetaData template = clusterState.getMetaData().templates().get("template_" + version.toLowerCase(Locale.ROOT)); diff --git a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java index f4b29768b91..f452bb5c36c 100644 --- a/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java +++ b/core/src/test/java/org/elasticsearch/client/transport/TransportClientHeadersTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -40,7 +41,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; @@ -114,12 +114,12 @@ public class TransportClientHeadersTests extends AbstractClientHeadersTestCase { public String description() { return "a mock transport service"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransportService("internal", InternalTransportService.class); + public void onModule(NetworkModule transportModule) { + transportModule.registerTransportService("internal", InternalTransportService.class); } @Override public Settings additionalSettings() { - return Settings.builder().put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, "internal").build(); + return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "internal").build(); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 5ed45620a03..8aa065548df 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.store.Store; @@ -126,7 +127,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() // manual collection or upon cluster forming. - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT, "1s") + .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s") .build(); } @@ -137,9 +138,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { } public void testClusterInfoServiceCollectsInformation() throws Exception { - internalCluster().startNodesAsync(2, - Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "200ms").build()) - .get(); + internalCluster().startNodesAsync(2).get(); assertAcked(prepareCreate("test").setSettings(settingsBuilder() .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL, 0) .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE).build())); @@ -147,6 +146,8 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { InternalTestCluster internalTestCluster = internalCluster(); // Get the cluster info service on the master node final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); + infoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); + infoService.onMaster(); ClusterInfo info = infoService.refresh(); assertNotNull("info should not be null", info); ImmutableOpenMap leastUsages = info.getNodeLeastAvailableDiskUsages(); @@ -188,7 +189,7 @@ public class ClusterInfoServiceIT extends ESIntegTestCase { public void testClusterInfoServiceInformationClearOnError() throws InterruptedException, ExecutionException { internalCluster().startNodesAsync(2, // manually control publishing - Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "60m").build()) + Settings.builder().put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.getKey(), "60m").build()) .get(); prepareCreate("test").setSettings(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1).get(); ensureGreen("test"); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 4ca0fffbdfc..48b2591559e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -31,11 +31,10 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; -import org.elasticsearch.cluster.settings.ClusterDynamicSettings; import org.elasticsearch.cluster.settings.DynamicSettings; import org.elasticsearch.cluster.settings.Validator; import org.elasticsearch.common.inject.ModuleTestCase; -import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.*; import org.elasticsearch.index.settings.IndexDynamicSettings; public class ClusterModuleTests extends ModuleTestCase { @@ -73,18 +72,20 @@ public class ClusterModuleTests extends ModuleTestCase { } public void testRegisterClusterDynamicSettingDuplicate() { - ClusterModule module = new ClusterModule(Settings.EMPTY); + final SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY); + SettingsModule module = new SettingsModule(Settings.EMPTY, settingsFilter); try { - module.registerClusterDynamicSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, Validator.EMPTY); + module.registerSetting(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING); } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "Cannot register setting [" + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE + "] twice"); + assertEquals(e.getMessage(), "Cannot register setting [" + EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey() + "] twice"); } } public void testRegisterClusterDynamicSetting() { - ClusterModule module = new ClusterModule(Settings.EMPTY); - module.registerClusterDynamicSetting("foo.bar", Validator.EMPTY); - assertInstanceBindingWithAnnotation(module, DynamicSettings.class, dynamicSettings -> dynamicSettings.hasDynamicSetting("foo.bar"), ClusterDynamicSettings.class); + final SettingsFilter settingsFilter = new SettingsFilter(Settings.EMPTY); + SettingsModule module = new SettingsModule(Settings.EMPTY, settingsFilter); + module.registerSetting(Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER)); + assertInstanceBinding(module, ClusterSettings.class, service -> service.hasDynamicSetting("foo.bar")); } public void testRegisterIndexDynamicSettingDuplicate() { diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java index 9e842a38722..9d453ead4de 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterServiceIT.java @@ -43,17 +43,29 @@ import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; /** * @@ -751,23 +763,40 @@ public class ClusterServiceIT extends ESIntegTestCase { } } + int numberOfThreads = randomIntBetween(2, 8); + int tasksSubmittedPerThread = randomIntBetween(1, 1024); + int numberOfExecutors = Math.max(1, numberOfThreads / 4); + final Semaphore semaphore = new Semaphore(numberOfExecutors); + class TaskExecutor implements ClusterStateTaskExecutor { private AtomicInteger counter = new AtomicInteger(); + private AtomicInteger batches = new AtomicInteger(); + private AtomicInteger published = new AtomicInteger(); @Override public BatchResult execute(ClusterState currentState, List tasks) throws Exception { tasks.forEach(task -> task.execute()); counter.addAndGet(tasks.size()); - return BatchResult.builder().successes(tasks).build(currentState); + ClusterState maybeUpdatedClusterState = currentState; + if (randomBoolean()) { + maybeUpdatedClusterState = ClusterState.builder(currentState).build(); + batches.incrementAndGet(); + semaphore.acquire(); + } + return BatchResult.builder().successes(tasks).build(maybeUpdatedClusterState); } @Override public boolean runOnlyOnMaster() { return false; } + + @Override + public void clusterStatePublished(ClusterState newClusterState) { + published.incrementAndGet(); + semaphore.release(); + } } - int numberOfThreads = randomIntBetween(2, 8); - int tasksSubmittedPerThread = randomIntBetween(1, 1024); ConcurrentMap counters = new ConcurrentHashMap<>(); CountDownLatch updateLatch = new CountDownLatch(numberOfThreads * tasksSubmittedPerThread); @@ -784,7 +813,6 @@ public class ClusterServiceIT extends ESIntegTestCase { } }; - int numberOfExecutors = Math.max(1, numberOfThreads / 4); List executors = new ArrayList<>(); for (int i = 0; i < numberOfExecutors; i++) { executors.add(new TaskExecutor()); @@ -830,6 +858,8 @@ public class ClusterServiceIT extends ESIntegTestCase { // wait until all the cluster state updates have been processed updateLatch.await(); + // and until all of the publication callbacks have completed + semaphore.acquire(numberOfExecutors); // assert the number of executed tasks is correct assertEquals(numberOfThreads * tasksSubmittedPerThread, counter.get()); @@ -838,6 +868,7 @@ public class ClusterServiceIT extends ESIntegTestCase { for (TaskExecutor executor : executors) { if (counts.containsKey(executor)) { assertEquals((int) counts.get(executor), executor.counter.get()); + assertEquals(executor.batches.get(), executor.published.get()); } } @@ -940,7 +971,7 @@ public class ClusterServiceIT extends ESIntegTestCase { public void testLongClusterStateUpdateLogging() throws Exception { Settings settings = settingsBuilder() .put("discovery.type", "local") - .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10s") + .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10s") .build(); internalCluster().startNode(settings); ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); @@ -976,7 +1007,7 @@ public class ClusterServiceIT extends ESIntegTestCase { processedFirstTask.await(1, TimeUnit.SECONDS); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10ms"))); + .put(InternalClusterService.CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.getKey(), "10ms"))); clusterService1.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 648356be173..2f1e5d33f7e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -280,7 +280,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { assertNoMasterBlockOnAllNodes(); logger.info("--> bringing another node up"); - internalCluster().startNode(settingsBuilder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2).build()); + internalCluster().startNode(settingsBuilder().put(settings).put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build()); clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").get(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); } @@ -317,7 +317,7 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { // set an initial value which is at least quorum to avoid split brains during initial startup int initialMinMasterNodes = randomIntBetween(nodeCount / 2 + 1, nodeCount); - settings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, initialMinMasterNodes); + settings.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), initialMinMasterNodes); logger.info("--> starting [{}] nodes. min_master_nodes set to [{}]", nodeCount, initialMinMasterNodes); @@ -328,19 +328,21 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { int updateCount = randomIntBetween(1, nodeCount); - logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount); + logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount))); + .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount))); logger.info("--> verifying no node left and master is up"); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut()); updateCount = nodeCount + randomIntBetween(1, 2000); - logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount); - assertThat(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, updateCount)) - .get().getPersistentSettings().getAsMap().keySet(), - empty()); + logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); + try { + client().admin().cluster().prepareUpdateSettings() + .setPersistentSettings(settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount)); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [" +updateCount+ "]"); + } logger.info("--> verifying no node left and master is up"); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut()); @@ -351,8 +353,8 @@ public class MinimumMasterNodesIT extends ESIntegTestCase { .put("discovery.type", "zen") .put(FaultDetection.SETTING_PING_TIMEOUT, "1h") // disable it .put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms") - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2) - .put(DiscoverySettings.COMMIT_TIMEOUT, "100ms") // speed things up + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "100ms") // speed things up .build(); internalCluster().startNodesAsync(3, settings).get(); ensureGreen(); // ensure cluster state is recovered before we disrupt things diff --git a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index e0f8b2cb840..8e5479d6f84 100644 --- a/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -67,7 +67,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { .put("discovery.zen.minimum_master_nodes", 2) .put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms") .put("discovery.initial_state_timeout", "500ms") - .put(DiscoverySettings.NO_MASTER_BLOCK, "all") + .put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all") .build(); TimeValue timeout = TimeValue.timeValueMillis(200); @@ -219,7 +219,7 @@ public class NoMasterNodeIT extends ESIntegTestCase { .put("discovery.zen.minimum_master_nodes", 2) .put(ZenDiscovery.SETTING_PING_TIMEOUT, "200ms") .put("discovery.initial_state_timeout", "500ms") - .put(DiscoverySettings.NO_MASTER_BLOCK, "write") + .put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "write") .build(); internalCluster().startNode(settings); diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java index 81de8b1a43c..c5e48a97dfd 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java @@ -50,8 +50,8 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase { .put(super.nodeSettings(nodeOrdinal)) //make sure that enough concurrent reroutes can happen at the same time //we have a minimum of 2 nodes, and a maximum of 10 shards, thus 5 should be enough - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 5) - .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 10) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 5) + .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 10) .build(); } @@ -69,7 +69,7 @@ public class AckClusterUpdateSettingsIT extends ESIntegTestCase { private void removePublishTimeout() { //to test that the acknowledgement mechanism is working we better disable the wait for publish //otherwise the operation is most likely acknowledged even if it doesn't support ack - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0"))); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0"))); } public void testClusterUpdateSettingsAcknowledgement() { diff --git a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index 47517a753af..7d3825a14b8 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -67,8 +67,8 @@ public class AckIT extends ESIntegTestCase { //to test that the acknowledgement mechanism is working we better disable the wait for publish //otherwise the operation is most likely acknowledged even if it doesn't support ack return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(DiscoverySettings.PUBLISH_TIMEOUT, 0).build(); - } + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), 0).build(); +} public void testUpdateSettingsAcknowledgement() { createIndex("test"); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index f9151628b8a..726590104f1 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -106,9 +106,9 @@ public class AwarenessAllocationIT extends ESIntegTestCase { public void testAwarenessZones() throws Exception { Settings commonSettings = Settings.settingsBuilder() - .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP + "zone.values", "a,b") - .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTES, "zone") - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 3) + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b") + .put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone") + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 3) .put(ZenDiscovery.SETTING_JOIN_TIMEOUT, "10s") .build(); diff --git a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index 1605e70637e..b85c17097f2 100644 --- a/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -56,7 +56,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_ME import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_READ; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_BLOCKS_WRITE; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_READ_ONLY; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; @@ -71,15 +71,15 @@ public class ClusterRerouteIT extends ESIntegTestCase { public void testRerouteWithCommands_disableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build(); rerouteWithCommands(commonSettings); } public void testRerouteWithCommands_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build(); rerouteWithCommands(commonSettings); } @@ -147,15 +147,15 @@ public class ClusterRerouteIT extends ESIntegTestCase { public void testRerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build(); rerouteWithAllocateLocalGateway(commonSettings); } public void testRerouteWithAllocateLocalGateway_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build(); rerouteWithAllocateLocalGateway(commonSettings); } @@ -279,7 +279,7 @@ public class ClusterRerouteIT extends ESIntegTestCase { logger.info("--> disable allocation"); Settings newSettings = settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build(); client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java index ef548140192..9a91e1cd562 100644 --- a/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -29,11 +29,7 @@ import org.junit.BeforeClass; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; @@ -246,7 +242,7 @@ public class DiscoveryNodeFiltersTests extends ESTestCase { private Settings shuffleSettings(Settings source) { Settings.Builder settings = Settings.settingsBuilder(); List keys = new ArrayList<>(source.getAsMap().keySet()); - Collections.shuffle(keys, getRandom()); + Collections.shuffle(keys, random()); for (String o : keys) { settings.put(o, source.getAsMap().get(o)); } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index c236ea54878..2d704380ae0 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -170,7 +170,7 @@ public class DelayedAllocationIT extends ESIntegTestCase { private String findNodeWithShard() { ClusterState state = client().admin().cluster().prepareState().get().getState(); List startedShards = state.routingTable().shardsWithState(ShardRoutingState.STARTED); - Collections.shuffle(startedShards, getRandom()); + Collections.shuffle(startedShards,random()); return state.nodes().get(startedShards.get(0).currentNodeId()).getName(); } } diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java new file mode 100644 index 00000000000..340fdcc3c99 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -0,0 +1,122 @@ +package org.elasticsearch.cluster.routing; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.disruption.NetworkDisconnectPartition; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +@ESIntegTestCase.SuppressLocalMode +public class PrimaryAllocationIT extends ESIntegTestCase { + + public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { + logger.info("--> starting 3 nodes, 1 master, 2 data"); + String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); + internalCluster().startDataOnlyNodesAsync(2).get(); + + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + ensureGreen(); + logger.info("--> indexing..."); + client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + refresh(); + + ClusterState state = client().admin().cluster().prepareState().all().get().getState(); + List shards = state.routingTable().allShards("test"); + assertThat(shards.size(), equalTo(2)); + + final String primaryNode; + final String replicaNode; + if (shards.get(0).primary()) { + primaryNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name(); + replicaNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name(); + } else { + primaryNode = state.getRoutingNodes().node(shards.get(1).currentNodeId()).node().name(); + replicaNode = state.getRoutingNodes().node(shards.get(0).currentNodeId()).node().name(); + } + + NetworkDisconnectPartition partition = new NetworkDisconnectPartition( + new HashSet<>(Arrays.asList(master, replicaNode)), Collections.singleton(primaryNode), random()); + internalCluster().setDisruptionScheme(partition); + logger.info("--> partitioning node with primary shard from rest of cluster"); + partition.startDisrupting(); + + ensureStableCluster(2, master); + + logger.info("--> index a document into previous replica shard (that is now primary)"); + client(replicaNode).prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + + logger.info("--> shut down node that has new acknowledged document"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); + + ensureStableCluster(1, master); + + partition.stopDisrupting(); + + logger.info("--> waiting for node with old primary shard to rejoin the cluster"); + ensureStableCluster(2, master); + + logger.info("--> check that old primary shard does not get promoted to primary again"); + // kick reroute and wait for all shard states to be fetched + client(master).admin().cluster().prepareReroute().get(); + assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0))); + // kick reroute a second time and check that all shards are unassigned + assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + + logger.info("--> starting node that reuses data folder with the up-to-date primary shard"); + internalCluster().startDataOnlyNode(Settings.EMPTY); + + logger.info("--> check that the up-to-date primary shard gets promoted and that documents are available"); + ensureYellow("test"); + assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l); + } + + public void testNotWaitForQuorumCopies() throws Exception { + logger.info("--> starting 3 nodes"); + internalCluster().startNodesAsync(3).get(); + logger.info("--> creating index with 1 primary and 2 replicas"); + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() + .put("index.number_of_shards", randomIntBetween(1, 3)).put("index.number_of_replicas", 2)).get()); + ensureGreen("test"); + client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + logger.info("--> removing 2 nodes from cluster"); + internalCluster().stopRandomDataNode(); + internalCluster().stopRandomDataNode(); + internalCluster().fullRestart(); + logger.info("--> checking that index still gets allocated with only 1 shard copy being available"); + ensureYellow("test"); + assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 1l); + } +} diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java index 7a7f4722e97..2e54512b95f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ActiveAllocationIdTests.java @@ -1,5 +1,24 @@ package org.elasticsearch.cluster.routing.allocation; +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java index d10912e69db..91ba1f4999c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AddIncrementallyTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; - import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -51,7 +50,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testAddNodesAndIndices() { Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); AllocationService service = createAllocationService(settings.build()); ClusterState clusterState = initCluster(service, 1, 3, 3, 1); @@ -94,7 +93,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testMinimalRelocations() { Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) .put("cluster.routing.allocation.node_concurrent_recoveries", 2); AllocationService service = createAllocationService(settings.build()); @@ -162,7 +161,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { public void testMinimalRelocationsNoLimit() { Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()) .put("cluster.routing.allocation.node_concurrent_recoveries", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100); AllocationService service = createAllocationService(settings.build()); @@ -388,7 +387,7 @@ public class AddIncrementallyTests extends ESAllocationTestCase { logger.info("Removing [{}] nodes", numNodes); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); ArrayList discoveryNodes = CollectionUtils.iterableAsArrayList(clusterState.nodes()); - Collections.shuffle(discoveryNodes, getRandom()); + Collections.shuffle(discoveryNodes, random()); for (DiscoveryNode node : discoveryNodes) { nodes.remove(node.id()); numNodes--; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 6ac2b7df9ca..1cf5ba0083d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -98,8 +98,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase { public void testAllocateCommand() { AllocationService allocation = createAllocationService(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build()); logger.info("--> building initial routing table"); @@ -186,8 +186,8 @@ public class AllocationCommandsTests extends ESAllocationTestCase { public void testCancelCommand() { AllocationService allocation = createAllocationService(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none") - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") .build()); logger.info("--> building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java index d7a049d1b92..8d510e7f0c5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationPriorityTests.java @@ -39,8 +39,8 @@ public class AllocationPriorityTests extends ESAllocationTestCase { public void testPrioritizedIndicesAllocatedFirst() { AllocationService allocation = createAllocationService(settingsBuilder(). put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, 1) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 1) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 1).build()); + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 1) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 1).build()); final String highPriorityName; final String lowPriorityName; final int priorityFirst; diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java index 7be6037cf79..e9d0f75b1c1 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -55,7 +55,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded1() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -123,7 +123,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded2() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -193,7 +193,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.awareness.attributes", "rack_id") .put("cluster.routing.allocation.balance.index", 0.0f) @@ -293,7 +293,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -387,7 +387,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded5() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -465,7 +465,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testMoveShardOnceNewNodeWithAttributeAdded6() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -545,7 +545,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testFullAwareness1() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -612,7 +612,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testFullAwareness2() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2") .put("cluster.routing.allocation.awareness.attributes", "rack_id") .build()); @@ -681,7 +681,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.awareness.force.rack_id.values", "1,2") .put("cluster.routing.allocation.awareness.attributes", "rack_id") @@ -767,7 +767,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { .put("cluster.routing.allocation.awareness.attributes", "zone") .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -828,7 +828,7 @@ public class AwarenessAllocationTests extends ESAllocationTestCase { public void testUnassignedShardsWithUnbalancedZones() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "zone") .build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java index 1092b2ede19..e622036e13b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/BalanceConfigurationTests.java @@ -37,10 +37,10 @@ import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllo import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators; import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESAllocationTestCase; import org.elasticsearch.test.gateway.NoopGatewayAllocator; import org.hamcrest.Matchers; @@ -65,10 +65,10 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { final float balanceTreshold = 1.0f; Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold); AllocationService strategy = createAllocationService(settings.build()); @@ -90,10 +90,10 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { final float balanceTreshold = 1.0f; Settings.Builder settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, indexBalance); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, replicaBalance); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, balanceTreshold); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), indexBalance); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), replicaBalance); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), balanceTreshold); AllocationService strategy = createAllocationService(settings.build()); @@ -279,36 +279,30 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { public void testPersistedSettings() { Settings.Builder settings = settingsBuilder(); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.2); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.3); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 2.0); - final NodeSettingsService.Listener[] listeners = new NodeSettingsService.Listener[1]; - NodeSettingsService service = new NodeSettingsService(settingsBuilder().build()) { - - @Override - public void addListener(Listener listener) { - assertNull("addListener was called twice while only one time was expected", listeners[0]); - listeners[0] = listener; - } - - }; + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.2); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.3); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 2.0); + ClusterSettings service = new ClusterSettings(settingsBuilder().build(), ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); BalancedShardsAllocator allocator = new BalancedShardsAllocator(settings.build(), service); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f)); settings = settingsBuilder(); - settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); - listeners[0].onRefreshSettings(settings.build()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.2); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.3); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 2.0); + settings.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()); + service.applySettings(settings.build()); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.2f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.3f)); assertThat(allocator.getThreshold(), Matchers.equalTo(2.0f)); settings = settingsBuilder(); - settings.put(BalancedShardsAllocator.SETTING_INDEX_BALANCE_FACTOR, 0.5); - settings.put(BalancedShardsAllocator.SETTING_SHARD_BALANCE_FACTOR, 0.1); - settings.put(BalancedShardsAllocator.SETTING_THRESHOLD, 3.0); - listeners[0].onRefreshSettings(settings.build()); + settings.put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5); + settings.put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 0.1); + settings.put(BalancedShardsAllocator.THRESHOLD_SETTING.getKey(), 3.0); + service.applySettings(settings.build()); assertThat(allocator.getIndexBalance(), Matchers.equalTo(0.5f)); assertThat(allocator.getShardBalance(), Matchers.equalTo(0.1f)); assertThat(allocator.getThreshold(), Matchers.equalTo(3.0f)); @@ -317,7 +311,7 @@ public class BalanceConfigurationTests extends ESAllocationTestCase { public void testNoRebalanceOnPrimaryOverload() { Settings.Builder settings = settingsBuilder(); AllocationService strategy = new AllocationService(settings.build(), randomAllocationDeciders(settings.build(), - new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()), new ShardsAllocators(settings.build(), + new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()), new ShardsAllocators(settings.build(), NoopGatewayAllocator.INSTANCE, new ShardsAllocator() { @Override diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index 8dad41db2f8..15a6ea0a5f4 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -46,7 +46,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { private final ESLogger logger = Loggers.getLogger(ClusterRebalanceRoutingTests.class); public void testAlways() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() @@ -132,7 +132,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { public void testClusterPrimariesActive1() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -236,7 +236,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterPrimariesActive2() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_PRIMARIES_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -320,7 +320,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterAllActive1() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -443,7 +443,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterAllActive2() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -527,7 +527,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { } public void testClusterAllActive3() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.INDICES_ALL_ACTIVE.toString()).build()); MetaData metaData = MetaData.builder() @@ -737,7 +737,7 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { public void testRebalanceWhileShardFetching() { final AtomicBoolean hasFetches = new AtomicBoolean(true); - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build(), new NoopGatewayAllocator() { @Override public boolean allocateUnassigned(RoutingAllocation allocation) { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java index e16e7cc2cec..d807dc1b5ca 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/DeadNodesAllocationTests.java @@ -46,7 +46,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { public void testSimpleDeadNodeOnStartedPrimaryShard() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); @@ -97,7 +97,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { public void testDeadNodeWhileRelocatingOnToNode() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); @@ -171,7 +171,7 @@ public class DeadNodesAllocationTests extends ESAllocationTestCase { public void testDeadNodeWhileRelocatingOnFromNode() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java index 3b242d8676f..affab78521c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -41,7 +41,7 @@ public class FailedNodeRoutingTests extends ESAllocationTestCase { private final ESLogger logger = Loggers.getLogger(FailedNodeRoutingTests.class); public void testSimpleFailedNodeTest() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java index 9bfaf7e9997..8dffacaa379 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/FailedShardsRoutingTests.java @@ -57,7 +57,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFailedShardPrimaryRelocatingToAndFrom() { AllocationService allocation = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("--> building initial routing table"); @@ -145,7 +145,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFailPrimaryStartedCheckReplicaElected() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -226,7 +226,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFirstAllocationFailureSingleNode() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -282,7 +282,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testSingleShardMultipleAllocationFailures() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -338,7 +338,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testFirstAllocationFailureTwoNodes() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); @@ -398,7 +398,7 @@ public class FailedShardsRoutingTests extends ESAllocationTestCase { public void testRebalanceFailure() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); logger.info("Building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java index aa6fdef828a..d5f8134d95f 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/IndexBalanceTests.java @@ -48,7 +48,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -178,7 +178,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -340,7 +340,7 @@ public class IndexBalanceTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java index 62501cbf9fb..7262de2b291 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java @@ -39,15 +39,10 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; -import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; -import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; +import static org.elasticsearch.cluster.routing.ShardRoutingState.*; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; /** * @@ -58,7 +53,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testDoNotAllocateFromPrimary() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -172,7 +167,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testRandom() { AllocationService service = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -199,7 +194,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); int numNodes = between(1, 20); if (nodes.size() > numNodes) { - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); nodes = nodes.subList(0, numNodes); } else { for (int j = nodes.size(); j < numNodes; j++) { @@ -221,7 +216,7 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase { public void testRollingRestart() { AllocationService service = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java index fbc742573e9..18725a0de78 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RebalanceAfterActiveTests.java @@ -57,7 +57,7 @@ public class RebalanceAfterActiveTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), new ClusterInfoService() { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java index eca2a227f8f..eec1b48be97 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/RoutingNodesIntegrityTests.java @@ -47,7 +47,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -119,7 +119,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); @@ -211,7 +211,7 @@ public class RoutingNodesIntegrityTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 1) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 3) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1).build()); logger.info("Building initial routing table"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java index f096ab0b13d..e1586c433a5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardVersioningTests.java @@ -41,7 +41,7 @@ public class ShardVersioningTests extends ESAllocationTestCase { private final ESLogger logger = Loggers.getLogger(ShardVersioningTests.class); public void testSimple() { - AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, + AllocationService strategy = createAllocationService(settingsBuilder().put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build()); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java index 11d41a6a336..c0f0c0c2252 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsLimitAllocationTests.java @@ -90,7 +90,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { public void testClusterLevelShardsLimitAllocate() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, 1) + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 1) .build()); logger.info("Building initial routing table"); @@ -126,7 +126,7 @@ public class ShardsLimitAllocationTests extends ESAllocationTestCase { // Bump the cluster total shards to 2 strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE, 2) + .put(ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING.getKey(), 2) .build()); logger.info("Do another reroute, make sure shards are now allocated"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java index ed44b84a886..29ef451324d 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/SingleShardNoReplicasRoutingTests.java @@ -211,7 +211,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { public void testMultiIndexEvenDistribution() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); @@ -323,7 +323,7 @@ public class SingleShardNoReplicasRoutingTests extends ESAllocationTestCase { public void testMultiIndexUnevenNodes() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build()); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java index 671cce007c9..aec81a6e063 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/TenShardsOneReplicaRoutingTests.java @@ -50,7 +50,7 @@ public class TenShardsOneReplicaRoutingTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.node_concurrent_recoveries", 10) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .put("cluster.routing.allocation.balance.index", 0.0f) .put("cluster.routing.allocation.balance.replica", 1.0f) diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index a739f30856a..5377d09d4b5 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -60,9 +60,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testDiskThreshold() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "node1", "/dev/null", 100, 10)); // 90% used @@ -96,7 +96,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { }; AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -170,9 +170,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 70 instead of 80 // node2 now should not have new shards allocated to it, but shards can remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.7).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.7).build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -181,7 +181,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -201,9 +201,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 60 instead of 70 // node2 now should not have new shards allocated to it, and shards cannot remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.5) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.6).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.5) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.6).build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -212,7 +212,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -254,9 +254,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testDiskThresholdWithAbsoluteSizes() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "30b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "9b").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "30b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "9b").build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 10)); // 90% used @@ -292,7 +292,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -349,7 +349,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { }; strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -405,9 +405,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 70 instead of 80 // node2 now should not have new shards allocated to it, but shards can remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "40b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "30b").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "40b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "30b").build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -416,7 +416,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -436,9 +436,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { // Set the high threshold to 60 instead of 70 // node2 now should not have new shards allocated to it, and shards cannot remain diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "50b") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "40b").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "50b") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "40b").build(); deciders = new AllocationDeciders(Settings.EMPTY, new HashSet<>(Arrays.asList( @@ -447,7 +447,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -522,9 +522,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testDiskThresholdWithShardSizes() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "71%").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "71%").build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 31)); // 69% used @@ -556,7 +556,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -589,9 +589,9 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testUnknownDiskUsage() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.85).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.85).build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node2", new DiskUsage("node2", "node2", "/dev/null", 100, 50)); // 50% used @@ -624,7 +624,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -688,10 +688,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testShardRelocationsTakenIntoAccount() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, 0.7) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, 0.8).build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), 0.7) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), 0.8).build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 40)); // 60% used @@ -727,7 +727,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); @@ -794,10 +794,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testCanRemainWithShardRelocatingAway() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); // We have an index with 2 primary shards each taking 40 bytes. Each node has 100 bytes available ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); @@ -889,7 +889,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { ))); AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); // Ensure that the reroute call doesn't alter the routing table, since the first primary is relocating away @@ -906,10 +906,10 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { public void testForSingleDataNode() { Settings diskSettings = settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, true) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "60%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%").build(); + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), true) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "60%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%").build(); ImmutableOpenMap.Builder usagesBuilder = ImmutableOpenMap.builder(); usagesBuilder.put("node1", new DiskUsage("node1", "n1", "/dev/null", 100, 100)); // 0% used @@ -989,7 +989,7 @@ public class DiskThresholdDeciderTests extends ESAllocationTestCase { AllocationService strategy = new AllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", -1) .build(), deciders, makeShardsAllocators(), cis); RoutingAllocation.Result result = strategy.reroute(clusterState, "reroute"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java index a386883ad1b..52e88ea3bc9 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderUnitTests.java @@ -28,12 +28,12 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import java.util.Arrays; @@ -45,7 +45,7 @@ import static org.hamcrest.CoreMatchers.equalTo; */ public class DiskThresholdDeciderUnitTests extends ESTestCase { public void testDynamicSettings() { - NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); @@ -59,18 +59,15 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { assertTrue(decider.isEnabled()); assertTrue(decider.isIncludeRelocations()); - DiskThresholdDecider.ApplySettings applySettings = decider.newApplySettings(); - Settings newSettings = Settings.builder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED, false) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS, false) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "70%") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "500mb") - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, "30s") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.getKey(), false) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING.getKey(), false) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "70%") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "500mb") + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "30s") .build(); - applySettings.onRefreshSettings(newSettings); - + nss.applySettings(newSettings); assertThat("high threshold bytes should be unset", decider.getFreeBytesThresholdHigh(), equalTo(ByteSizeValue.parseBytesSizeValue("0b", "test"))); assertThat("high threshold percentage should be changed", @@ -86,7 +83,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { } public void testCanAllocateUsesMaxAvailableSpace() { - NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); @@ -127,7 +124,7 @@ public class DiskThresholdDeciderUnitTests extends ESTestCase { } public void testCanRemainUsesLeastAvailableSpace() { - NodeSettingsService nss = new NodeSettingsService(Settings.EMPTY); + ClusterSettings nss = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); ClusterInfoService cis = EmptyClusterInfoService.INSTANCE; DiskThresholdDecider decider = new DiskThresholdDecider(Settings.EMPTY, nss, cis, null); ImmutableOpenMap.Builder shardRoutingMap = ImmutableOpenMap.builder(); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java index 940634a4657..be64aafc61e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDeciderIT.java @@ -37,7 +37,7 @@ public class EnableAllocationDeciderIT extends ESIntegTestCase { public void testEnableRebalance() throws InterruptedException { final String firstNode = internalCluster().startNode(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get(); // we test with 2 shards since otherwise it's pretty fragile if there are difference in the num or shards such that // all shards are relocated to the second node which is not what we want here. It's solely a test for the settings to take effect final int numShards = 2; @@ -64,7 +64,7 @@ public class EnableAllocationDeciderIT extends ESIntegTestCase { assertThat("index: [test] expected to be rebalanced on both nodes", test.size(), equalTo(2)); // flip the cluster wide setting such that we can also balance for index test_1 eventually we should have one shard of each index on each node - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? EnableAllocationDecider.Rebalance.PRIMARIES : EnableAllocationDecider.Rebalance.ALL)).get(); logger.info("--> balance index [test_1]"); client().admin().cluster().prepareReroute().get(); ensureGreen("test_1"); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java index 0049a120777..1bdc39036a3 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -32,10 +31,10 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Rebalance; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESAllocationTestCase; import java.util.EnumSet; @@ -44,8 +43,8 @@ import java.util.List; import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING; import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.hamcrest.Matchers.equalTo; @@ -58,7 +57,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { public void testClusterEnableNone() { AllocationService strategy = createAllocationService(settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE.name()) .build()); logger.info("Building initial routing table"); @@ -86,7 +85,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { public void testClusterEnableOnlyPrimaries() { AllocationService strategy = createAllocationService(settingsBuilder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.PRIMARIES.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.PRIMARIES.name()) .build()); logger.info("Building initial routing table"); @@ -159,11 +158,11 @@ public class EnableAllocationTests extends ESAllocationTestCase { final boolean useClusterSetting = randomBoolean(); final Rebalance allowedOnes = RandomPicks.randomFrom(getRandom(), EnumSet.of(Rebalance.PRIMARIES, Rebalance.REPLICAS, Rebalance.ALL)); Settings build = settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings - .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 3) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings + .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .build(); - NodeSettingsService nodeSettingsService = new NodeSettingsService(build); - AllocationService strategy = createAllocationService(build, nodeSettingsService, getRandom()); + ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationService strategy = createAllocationService(build, clusterSettings, getRandom()); Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build(); logger.info("Building initial routing table"); @@ -213,7 +212,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { if (useClusterSetting) { prevState = clusterState; clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, allowedOnes) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), allowedOnes) .build())).build(); } else { prevState = clusterState; @@ -224,7 +223,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { .build(); } - nodeSettingsService.clusterChanged(new ClusterChangedEvent("foo", clusterState, prevState)); + clusterSettings.applySettings(clusterState.metaData().settings()); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat("expected 6 shards to be started 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(6)); @@ -261,11 +260,11 @@ public class EnableAllocationTests extends ESAllocationTestCase { public void testEnableClusterBalanceNoReplicas() { final boolean useClusterSetting = randomBoolean(); Settings build = settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings - .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE, 3) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), useClusterSetting ? Rebalance.NONE: RandomPicks.randomFrom(getRandom(), Rebalance.values())) // index settings override cluster settings + .put(ConcurrentRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING.getKey(), 3) .build(); - NodeSettingsService nodeSettingsService = new NodeSettingsService(build); - AllocationService strategy = createAllocationService(build, nodeSettingsService, getRandom()); + ClusterSettings clusterSettings = new ClusterSettings(build, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AllocationService strategy = createAllocationService(build, clusterSettings, getRandom()); Settings indexSettings = useClusterSetting ? Settings.EMPTY : settingsBuilder().put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, Rebalance.NONE).build(); logger.info("Building initial routing table"); @@ -307,7 +306,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { if (useClusterSetting) { prevState = clusterState; clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).transientSettings(settingsBuilder() - .put(CLUSTER_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL) + .put(CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL) .build())).build(); } else { prevState = clusterState; @@ -315,7 +314,7 @@ public class EnableAllocationTests extends ESAllocationTestCase { clusterState = ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).removeAllIndices() .put(IndexMetaData.builder(meta).settings(settingsBuilder().put(meta.getSettings()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, randomBoolean() ? Rebalance.PRIMARIES : Rebalance.ALL).build()))).build(); } - nodeSettingsService.clusterChanged(new ClusterChangedEvent("foo", clusterState, prevState)); + clusterSettings.applySettings(clusterState.metaData().settings()); routingTable = strategy.reroute(clusterState, "reroute").routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); assertThat("expected 4 primaries to be started and 2 to relocate useClusterSettings: " + useClusterSetting, clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(4)); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 126799f5937..a17017f6303 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.InternalClusterInfoService; import org.elasticsearch.cluster.MockInternalClusterInfoService; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -46,22 +47,12 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) public class MockDiskUsagesIT extends ESIntegTestCase { - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - // Update more frequently - .put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL, "1s") - .build(); - } - @Override protected Collection> nodePlugins() { // Use the mock internal cluster info service, which has fake-able disk usages return pluginList(MockInternalClusterInfoService.TestPlugin.class); } - //@TestLogging("org.elasticsearch.cluster:TRACE,org.elasticsearch.cluster.routing.allocation.decider:TRACE") public void testRerouteOccursOnDiskPassingHighWatermark() throws Exception { List nodes = internalCluster().startNodesAsync(3).get(); @@ -77,15 +68,16 @@ public class MockDiskUsagesIT extends ESIntegTestCase { // Start with all nodes at 50% usage final MockInternalClusterInfoService cis = (MockInternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName()); + cis.setUpdateFrequency(TimeValue.timeValueMillis(200)); + cis.onMaster(); cis.setN1Usage(nodes.get(0), new DiskUsage(nodes.get(0), "n1", "/dev/null", 100, 50)); cis.setN2Usage(nodes.get(1), new DiskUsage(nodes.get(1), "n2", "/dev/null", 100, 50)); cis.setN3Usage(nodes.get(2), new DiskUsage(nodes.get(2), "n3", "/dev/null", 100, 50)); client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, randomFrom("20b", "80%")) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, randomFrom("10b", "90%")) - .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL, "1ms")).get(); - + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), randomFrom("20b", "80%")) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), randomFrom("10b", "90%")) + .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING.getKey(), "1ms")).get(); // Create an index with 10 shards so we can check allocation for it prepareCreate("test").setSettings(settingsBuilder() .put("number_of_shards", 10) diff --git a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 65d5b0b9fcd..fb8a8e28b33 100644 --- a/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/core/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.settings; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.cluster.ClusterName; @@ -32,8 +33,8 @@ import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.index.store.IndexStoreConfig; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.hamcrest.Matchers; +import static org.elasticsearch.common.inject.matcher.Matchers.not; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -48,22 +49,142 @@ public class ClusterSettingsIT extends ESIntegTestCase { public void testClusterNonExistingSettingsUpdate() { String key1 = "no_idea_what_you_are_talking_about"; int value1 = 10; + try { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(key1, value1).build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "transient setting [no_idea_what_you_are_talking_about], not dynamically updateable"); + } + } + + public void testDeleteIsAppliedFirst() { + DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); + + assertEquals(discoverySettings.getPublishTimeout(), DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY)); + assertTrue(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY)); + + ClusterUpdateSettingsResponse response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false) + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) + .get(); + + assertAcked(response); + assertEquals(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), "1s"); + assertTrue(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY)); + assertFalse(response.getTransientSettings().getAsBoolean(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), null)); + + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*")).put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "2s")) + .get(); + assertEquals(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), "2s"); + assertNull(response.getTransientSettings().getAsBoolean(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), null)); + } + + public void testResetClusterSetting() { + DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); + + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); ClusterUpdateSettingsResponse response = client().admin().cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(key1, value1).build()) + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) .get(); assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); + assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())) + .get(); + + assertAcked(response); + assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder() + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build()) + .get(); + + assertAcked(response); + assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertFalse(discoverySettings.getPublishDiff()); + response = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*"))) + .get(); + + assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertNull(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + // now persistent + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) + .get(); + + assertAcked(response); + assertThat(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull((DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()))) + .get(); + + assertAcked(response); + assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); + + + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder() + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build()) + .get(); + + assertAcked(response); + assertThat(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); + assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); + assertFalse(discoverySettings.getPublishDiff()); + response = client().admin().cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull((randomBoolean() ? "discovery.zen.*" : "*"))) + .get(); + + assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey())); + assertNull(response.getPersistentSettings().getAsMap().get(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey())); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); + assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY))); } public void testClusterSettingsUpdateResponse() { - String key1 = IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC; + String key1 = IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(); int value1 = 10; - String key2 = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; - boolean value2 = false; + String key2 = EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(); + String value2 = EnableAllocationDecider.Allocation.NONE.name(); Settings transientSettings1 = Settings.builder().put(key1, value1, ByteSizeUnit.BYTES).build(); Settings persistentSettings1 = Settings.builder().put(key2, value2).build(); @@ -114,43 +235,59 @@ public class ClusterSettingsIT extends ESIntegTestCase { assertThat(response3.getPersistentSettings().get(key2), notNullValue()); } + public void testCanUpdateTracerSettings() { + ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putArray("transport.tracer.include", "internal:index/shard/recovery/*", + "internal:gateway/local*")) + .get(); + assertArrayEquals(clusterUpdateSettingsResponse.getTransientSettings().getAsArray("transport.tracer.include"), new String[] {"internal:index/shard/recovery/*", + "internal:gateway/local*"}); + } + public void testUpdateDiscoveryPublishTimeout() { DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class); - assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.DEFAULT_PUBLISH_TIMEOUT)); + assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY))); ClusterUpdateSettingsResponse response = client().admin().cluster() .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "1s").build()) + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s").build()) .get(); assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT), equalTo("1s")); + assertThat(response.getTransientSettings().getAsMap().get(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey()), equalTo("1s")); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); - response = client().admin().cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "whatever").build()) - .get(); + try { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "whatever").build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "Failed to parse setting [discovery.zen.commit_timeout] with value [whatever] as a time value: unit is missing or unrecognized"); + } - assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); - response = client().admin().cluster() - .prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, -1).build()) - .get(); + try { + client().admin().cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), -1).build()) + .get(); + fail("bogus value"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "Failed to parse value [-1] for setting [discovery.zen.publish_timeout] must be >= 0s"); + } - assertAcked(response); - assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); } public void testClusterUpdateSettingsWithBlocks() { String key1 = "cluster.routing.allocation.enable"; - Settings transientSettings = Settings.builder().put(key1, false).build(); + Settings transientSettings = Settings.builder().put(key1, EnableAllocationDecider.Allocation.NONE.name()).build(); String key2 = "cluster.routing.allocation.node_concurrent_recoveries"; Settings persistentSettings = Settings.builder().put(key2, "5").build(); @@ -165,7 +302,7 @@ public class ClusterSettingsIT extends ESIntegTestCase { assertBlocked(request, MetaData.CLUSTER_READ_ONLY_BLOCK); // But it's possible to update the settings to update the "cluster.blocks.read_only" setting - Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, false).build(); + Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), false).build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get()); } finally { diff --git a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index c5a695d16e5..6094d49234c 100644 --- a/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -225,7 +225,7 @@ public class RoutingIteratorTests extends ESAllocationTestCase { public void testAttributePreferenceRouting() { AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "rack_id,zone") .build()); @@ -280,7 +280,7 @@ public class RoutingIteratorTests extends ESAllocationTestCase { public void testNodeSelectorRouting(){ AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .build()); MetaData metaData = MetaData.builder() diff --git a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java index fa4ce357a52..bb9d23db1cb 100644 --- a/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java +++ b/core/src/test/java/org/elasticsearch/common/breaker/MemoryCircuitBreakerTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.common.breaker; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import java.util.concurrent.atomic.AtomicBoolean; @@ -87,7 +87,7 @@ public class MemoryCircuitBreakerTests extends ESTestCase { final AtomicReference lastException = new AtomicReference<>(null); final AtomicReference breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)) { + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { @Override public CircuitBreaker getBreaker(String name) { @@ -147,7 +147,7 @@ public class MemoryCircuitBreakerTests extends ESTestCase { final AtomicInteger parentTripped = new AtomicInteger(0); final AtomicReference breakerRef = new AtomicReference<>(null); - final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)) { + final CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) { @Override public CircuitBreaker getBreaker(String name) { diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java index f15a731e86e..279e31aadd4 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/AbstractShapeBuilderTestCase.java @@ -23,14 +23,20 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; import java.io.IOException; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; public abstract class AbstractShapeBuilderTestCase extends ESTestCase { @@ -47,6 +53,12 @@ public abstract class AbstractShapeBuilderTestCase exte namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PointBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, CircleBuilder.PROTOTYPE); namedWriteableRegistry.registerPrototype(ShapeBuilder.class, EnvelopeBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPointBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, LineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiLineStringBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, PolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, MultiPolygonBuilder.PROTOTYPE); + namedWriteableRegistry.registerPrototype(ShapeBuilder.class, GeometryCollectionBuilder.PROTOTYPE); } } @@ -63,7 +75,7 @@ public abstract class AbstractShapeBuilderTestCase exte /** * mutate the given shape so the returned shape is different */ - protected abstract SB mutate(SB original) throws IOException; + protected abstract SB createMutation(SB original) throws IOException; /** * Test that creates new shape from a random test shape and checks both for equality @@ -89,19 +101,21 @@ public abstract class AbstractShapeBuilderTestCase exte /** * Test serialization and deserialization of the test shape. */ + @SuppressWarnings("unchecked") public void testSerialization() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB testShape = createTestShapeBuilder(); - SB deserializedShape = copyShape(testShape); - assertEquals(deserializedShape, testShape); - assertEquals(deserializedShape.hashCode(), testShape.hashCode()); - assertNotSame(deserializedShape, testShape); + SB deserializedShape = (SB) copyShape(testShape); + assertEquals(testShape, deserializedShape); + assertEquals(testShape.hashCode(), deserializedShape.hashCode()); + assertNotSame(testShape, deserializedShape); } } /** * Test equality and hashCode properties */ + @SuppressWarnings("unchecked") public void testEqualsAndHashcode() throws IOException { for (int runs = 0; runs < NUMBER_OF_TESTBUILDERS; runs++) { SB firstShape = createTestShapeBuilder(); @@ -110,15 +124,15 @@ public abstract class AbstractShapeBuilderTestCase exte assertTrue("shape is not equal to self", firstShape.equals(firstShape)); assertThat("same shape's hashcode returns different values if called multiple times", firstShape.hashCode(), equalTo(firstShape.hashCode())); - assertThat("different shapes should not be equal", mutate(firstShape), not(equalTo(firstShape))); + assertThat("different shapes should not be equal", createMutation(firstShape), not(equalTo(firstShape))); - SB secondShape = copyShape(firstShape); + SB secondShape = (SB) copyShape(firstShape); assertTrue("shape is not equal to self", secondShape.equals(secondShape)); assertTrue("shape is not equal to its copy", firstShape.equals(secondShape)); assertTrue("equals is not symmetric", secondShape.equals(firstShape)); assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(firstShape.hashCode())); - SB thirdShape = copyShape(secondShape); + SB thirdShape = (SB) copyShape(secondShape); assertTrue("shape is not equal to self", thirdShape.equals(thirdShape)); assertTrue("shape is not equal to its copy", secondShape.equals(thirdShape)); assertThat("shape copy's hashcode is different from original hashcode", secondShape.hashCode(), equalTo(thirdShape.hashCode())); @@ -129,14 +143,12 @@ public abstract class AbstractShapeBuilderTestCase exte } } - protected SB copyShape(SB original) throws IOException { + static ShapeBuilder copyShape(ShapeBuilder original) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { original.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(output.bytes()), namedWriteableRegistry)) { ShapeBuilder prototype = (ShapeBuilder) namedWriteableRegistry.getPrototype(ShapeBuilder.class, original.getWriteableName()); - @SuppressWarnings("unchecked") - SB copy = (SB) prototype.readFrom(in); - return copy; + return prototype.readFrom(in); } } } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/CirlceBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java similarity index 81% rename from core/src/test/java/org/elasticsearch/common/geo/builders/CirlceBuilderTests.java rename to core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java index 6b102b87b2c..1db9da428ad 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/CirlceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/CircleBuilderTests.java @@ -25,20 +25,20 @@ import org.elasticsearch.common.unit.DistanceUnit; import java.io.IOException; -public class CirlceBuilderTests extends AbstractShapeBuilderTestCase { +public class CircleBuilderTests extends AbstractShapeBuilderTestCase { @Override protected CircleBuilder createTestShapeBuilder() { - double centerX = randomDoubleBetween(-180, 180, false); - double centerY = randomDoubleBetween(-90, 90, false); - return new CircleBuilder() - .center(new Coordinate(centerX, centerY)) - .radius(randomDoubleBetween(0.1, 10.0, false), randomFrom(DistanceUnit.values())); + return createRandomShape(); } @Override - protected CircleBuilder mutate(CircleBuilder original) throws IOException { - CircleBuilder mutation = copyShape(original); + protected CircleBuilder createMutation(CircleBuilder original) throws IOException { + return mutate(original); + } + + static CircleBuilder mutate(CircleBuilder original) throws IOException { + CircleBuilder mutation = (CircleBuilder) copyShape(original); double radius = original.radius(); DistanceUnit unit = original.unit(); @@ -55,4 +55,12 @@ public class CirlceBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected GeometryCollectionBuilder createTestShapeBuilder() { + GeometryCollectionBuilder geometryCollection = new GeometryCollectionBuilder(); + int shapes = randomIntBetween(0, 8); + for (int i = 0; i < shapes; i++) { + switch (randomIntBetween(0, 7)) { + case 0: + geometryCollection.shape(PointBuilderTests.createRandomShape()); + break; + case 1: + geometryCollection.shape(CircleBuilderTests.createRandomShape()); + break; + case 2: + geometryCollection.shape(EnvelopeBuilderTests.createRandomShape()); + break; + case 3: + geometryCollection.shape(LineStringBuilderTests.createRandomShape()); + break; + case 4: + geometryCollection.shape(MultiLineStringBuilderTests.createRandomShape()); + break; + case 5: + geometryCollection.shape(MultiPolygonBuilderTests.createRandomShape()); + break; + case 6: + geometryCollection.shape(MultiPointBuilderTests.createRandomShape()); + break; + case 7: + geometryCollection.shape(PolygonBuilderTests.createRandomShape()); + break; + } + } + return geometryCollection; + } + + @Override + protected GeometryCollectionBuilder createMutation(GeometryCollectionBuilder original) throws IOException { + return mutate(original); + } + + static GeometryCollectionBuilder mutate(GeometryCollectionBuilder original) throws IOException { + GeometryCollectionBuilder mutation = (GeometryCollectionBuilder) copyShape(original); + if (mutation.shapes.size() > 0) { + int shapePosition = randomIntBetween(0, mutation.shapes.size() - 1); + ShapeBuilder shapeToChange = mutation.shapes.get(shapePosition); + switch (shapeToChange.type()) { + case POINT: + shapeToChange = PointBuilderTests.mutate((PointBuilder) shapeToChange); + break; + case CIRCLE: + shapeToChange = CircleBuilderTests.mutate((CircleBuilder) shapeToChange); + break; + case ENVELOPE: + shapeToChange = EnvelopeBuilderTests.mutate((EnvelopeBuilder) shapeToChange); + break; + case LINESTRING: + shapeToChange = LineStringBuilderTests.mutate((LineStringBuilder) shapeToChange); + break; + case MULTILINESTRING: + shapeToChange = MultiLineStringBuilderTests.mutate((MultiLineStringBuilder) shapeToChange); + break; + case MULTIPOLYGON: + shapeToChange = MultiPolygonBuilderTests.mutate((MultiPolygonBuilder) shapeToChange); + break; + case MULTIPOINT: + shapeToChange = MultiPointBuilderTests.mutate((MultiPointBuilder) shapeToChange); + break; + case POLYGON: + shapeToChange = PolygonBuilderTests.mutate((PolygonBuilder) shapeToChange); + break; + case GEOMETRYCOLLECTION: + throw new UnsupportedOperationException("GeometryCollection should not be nested inside each other"); + } + mutation.shapes.set(shapePosition, shapeToChange); + } else { + mutation.shape(RandomShapeGenerator.createShape(getRandom())); + } + return mutation; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java new file mode 100644 index 00000000000..53e30cc5a80 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/LineStringBuilderTests.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class LineStringBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected LineStringBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected LineStringBuilder createMutation(LineStringBuilder original) throws IOException { + return mutate(original); + } + + static LineStringBuilder mutate(LineStringBuilder original) throws IOException { + LineStringBuilder mutation = (LineStringBuilder) copyShape(original); + Coordinate[] coordinates = original.coordinates(false); + Coordinate coordinate = randomFrom(coordinates); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + return mutation.points(coordinates); + } + + static LineStringBuilder createRandomShape() { + LineStringBuilder lsb = (LineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.LINESTRING); + if (randomBoolean()) { + lsb.close(); + } + return lsb; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java new file mode 100644 index 00000000000..5d0ad7ed130 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiLineStringBuilderTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class MultiLineStringBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected MultiLineStringBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected MultiLineStringBuilder createMutation(MultiLineStringBuilder original) throws IOException { + return mutate(original); + } + + static MultiLineStringBuilder mutate(MultiLineStringBuilder original) throws IOException { + MultiLineStringBuilder mutation = (MultiLineStringBuilder) copyShape(original); + Coordinate[][] coordinates = mutation.coordinates(); + int lineToChange = randomInt(coordinates.length - 1); + for (int i = 0; i < coordinates.length; i++) { + Coordinate[] line = coordinates[i]; + if (i == lineToChange) { + Coordinate coordinate = randomFrom(line); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + } + } + return mutation; + } + + static MultiLineStringBuilder createRandomShape() { + return (MultiLineStringBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTILINESTRING); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java new file mode 100644 index 00000000000..fca76e2e973 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPointBuilderTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class MultiPointBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected MultiPointBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected MultiPointBuilder createMutation(MultiPointBuilder original) throws IOException { + return mutate(original); + } + + static MultiPointBuilder mutate(MultiPointBuilder original) throws IOException { + MultiPointBuilder mutation = (MultiPointBuilder) copyShape(original); + Coordinate[] coordinates = original.coordinates(false); + Coordinate coordinate = randomFrom(coordinates); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + return mutation.points(coordinates); + } + + static MultiPointBuilder createRandomShape() { + return (MultiPointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.MULTIPOINT); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java new file mode 100644 index 00000000000..702114a2cb8 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/MultiPolygonBuilderTests.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class MultiPolygonBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected MultiPolygonBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected MultiPolygonBuilder createMutation(MultiPolygonBuilder original) throws IOException { + return mutate(original); + } + + static MultiPolygonBuilder mutate(MultiPolygonBuilder original) throws IOException { + MultiPolygonBuilder mutation; + if (randomBoolean()) { + mutation = new MultiPolygonBuilder(original.orientation() == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); + for (PolygonBuilder pb : original.polygons()) { + mutation.polygon((PolygonBuilder) copyShape(pb)); + } + } else { + mutation = (MultiPolygonBuilder) copyShape(original); + if (mutation.polygons().size() > 0) { + int polyToChange = randomInt(mutation.polygons().size() - 1); + mutation.polygons().set(polyToChange, PolygonBuilderTests.mutatePolygonBuilder(mutation.polygons().get(polyToChange))); + } else { + mutation.polygon((PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON)); + } + } + return mutation; + } + + static MultiPolygonBuilder createRandomShape() { + MultiPolygonBuilder mpb = new MultiPolygonBuilder(randomFrom(Orientation.values())); + int polys = randomIntBetween(0, 10); + for (int i = 0; i < polys; i++) { + PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); + mpb.polygon(pgb); + } + return mpb; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java index 1e94a1bab3a..1946d24581b 100644 --- a/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PointBuilderTests.java @@ -24,15 +24,27 @@ import com.vividsolutions.jts.geom.Coordinate; import org.elasticsearch.test.geo.RandomShapeGenerator; import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; +import java.io.IOException; + public class PointBuilderTests extends AbstractShapeBuilderTestCase { @Override protected PointBuilder createTestShapeBuilder() { - return (PointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POINT); + return createRandomShape(); } @Override - protected PointBuilder mutate(PointBuilder original) { - return new PointBuilder().coordinate(new Coordinate(original.longitude()/2, original.latitude()/2)); + protected PointBuilder createMutation(PointBuilder original) throws IOException { + return mutate(original); } + + static PointBuilder mutate(PointBuilder original) { + return new PointBuilder().coordinate(new Coordinate(original.longitude() / 2, original.latitude() / 2)); + } + + static PointBuilder createRandomShape() { + return (PointBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POINT); + } + + } diff --git a/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java new file mode 100644 index 00000000000..ad8b3b817fe --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/geo/builders/PolygonBuilderTests.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.geo.builders; + +import com.vividsolutions.jts.geom.Coordinate; + +import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation; +import org.elasticsearch.test.geo.RandomShapeGenerator; +import org.elasticsearch.test.geo.RandomShapeGenerator.ShapeType; + +import java.io.IOException; + +public class PolygonBuilderTests extends AbstractShapeBuilderTestCase { + + @Override + protected PolygonBuilder createTestShapeBuilder() { + return createRandomShape(); + } + + @Override + protected PolygonBuilder createMutation(PolygonBuilder original) throws IOException { + return mutate(original); + } + + static PolygonBuilder mutate(PolygonBuilder original) throws IOException { + PolygonBuilder mutation = (PolygonBuilder) copyShape(original); + return mutatePolygonBuilder(mutation); + } + + static PolygonBuilder mutatePolygonBuilder(PolygonBuilder pb) { + if (randomBoolean()) { + pb = polyWithOposingOrientation(pb); + } else { + // change either point in shell or in random hole + LineStringBuilder lineToChange; + if (randomBoolean() || pb.holes().size() == 0) { + lineToChange = pb.shell(); + } else { + lineToChange = randomFrom(pb.holes()); + } + Coordinate coordinate = randomFrom(lineToChange.coordinates(false)); + if (randomBoolean()) { + if (coordinate.x != 0.0) { + coordinate.x = coordinate.x / 2; + } else { + coordinate.x = randomDoubleBetween(-180.0, 180.0, true); + } + } else { + if (coordinate.y != 0.0) { + coordinate.y = coordinate.y / 2; + } else { + coordinate.y = randomDoubleBetween(-90.0, 90.0, true); + } + } + } + return pb; + } + + /** + * Takes an input polygon and returns an identical one, only with opposing orientation setting. + * This is done so we don't have to expose a setter for orientation in the actual class + */ + private static PolygonBuilder polyWithOposingOrientation(PolygonBuilder pb) { + PolygonBuilder mutation = new PolygonBuilder(pb.orientation() == Orientation.LEFT ? Orientation.RIGHT : Orientation.LEFT); + mutation.points(pb.shell().coordinates(false)); + for (LineStringBuilder hole : pb.holes()) { + mutation.hole(hole); + } + return mutation; + } + + static PolygonBuilder createRandomShape() { + PolygonBuilder pgb = (PolygonBuilder) RandomShapeGenerator.createShape(getRandom(), ShapeType.POLYGON); + if (randomBoolean()) { + pgb = polyWithOposingOrientation(pgb); + } + return pgb; + } +} diff --git a/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java b/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java index 7901694bd4b..7d696b0cd81 100644 --- a/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/inject/ModuleTestCase.java @@ -60,6 +60,24 @@ public abstract class ModuleTestCase extends ESTestCase { fail("Did not find any binding to " + to.getName() + ". Found these bindings:\n" + s); } + /** Configures the module and asserts "clazz" is not bound to anything. */ + public void assertNotBound(Module module, Class clazz) { + List elements = Elements.getElements(module); + for (Element element : elements) { + if (element instanceof LinkedKeyBinding) { + LinkedKeyBinding binding = (LinkedKeyBinding) element; + if (clazz.equals(binding.getKey().getTypeLiteral().getType())) { + fail("Found binding for " + clazz.getName() + " to " + binding.getKey().getTypeLiteral().getType().getTypeName()); + } + } else if (element instanceof UntargettedBinding) { + UntargettedBinding binding = (UntargettedBinding) element; + if (clazz.equals(binding.getKey().getTypeLiteral().getType())) { + fail("Found binding for " + clazz.getName()); + } + } + } + } + /** * Attempts to configure the module, and asserts an {@link IllegalArgumentException} is * caught, containing the given messages diff --git a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index fcd2f7d8767..17345fd714f 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -17,20 +17,14 @@ * under the License. */ package org.elasticsearch.common.lucene; + import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.Field.Store; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.NoDeletionPolicy; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.RandomIndexWriter; -import org.apache.lucene.index.SegmentInfos; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; @@ -41,11 +35,7 @@ import org.apache.lucene.util.Version; import org.elasticsearch.test.ESTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java index f4f3034528f..7ee238ae7f2 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/all/SimpleAllTests.java @@ -220,6 +220,41 @@ public class SimpleAllTests extends ESTestCase { indexWriter.close(); } + public void testTermMissingFromOneSegment() throws Exception { + Directory dir = new RAMDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + + Document doc = new Document(); + doc.add(new Field("_id", "1", StoredField.TYPE)); + AllEntries allEntries = new AllEntries(); + allEntries.addText("field", "something", 2.0f); + allEntries.reset(); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + + indexWriter.addDocument(doc); + indexWriter.commit(); + + doc = new Document(); + doc.add(new Field("_id", "2", StoredField.TYPE)); + allEntries = new AllEntries(); + allEntries.addText("field", "else", 1.0f); + allEntries.reset(); + doc.add(new TextField("_all", AllTokenStream.allTokenStream("_all", allEntries, Lucene.STANDARD_ANALYZER))); + + indexWriter.addDocument(doc); + + IndexReader reader = DirectoryReader.open(indexWriter, true); + assertEquals(2, reader.leaves().size()); + IndexSearcher searcher = new IndexSearcher(reader); + + // "something" only appears in the first segment: + Query query = new AllTermQuery(new Term("_all", "something")); + TopDocs docs = searcher.search(query, 10); + assertEquals(1, docs.totalHits); + + indexWriter.close(); + } + public void testMultipleTokensAllNoBoost() throws Exception { Directory dir = new RAMDirectory(); IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); diff --git a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java index 96715a05b3c..ad811a38aed 100644 --- a/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java +++ b/core/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java @@ -24,13 +24,7 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.StringField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.DirectoryReader; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.index.IndexWriterConfig; -import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.index.Term; +import org.apache.lucene.index.*; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -42,14 +36,7 @@ import org.elasticsearch.test.ESTestCase; import org.junit.After; import org.junit.Before; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -201,7 +188,7 @@ public class FreqTermsEnumTests extends ESTestCase { for (int i = 0; i < cycles; i++) { List terms = new ArrayList<>(Arrays.asList(this.terms)); - Collections.shuffle(terms, getRandom()); + Collections.shuffle(terms, random()); for (String term : terms) { if (!termsEnum.seekExact(new BytesRef(term))) { assertThat("term : " + term, reference.get(term).docFreq, is(0)); diff --git a/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java new file mode 100644 index 00000000000..798e82a979e --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -0,0 +1,176 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.network; + +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Table; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.http.HttpServerAdapter; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpStats; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.cat.AbstractCatAction; +import org.elasticsearch.rest.action.cat.RestNodesAction; +import org.elasticsearch.rest.action.main.RestMainAction; +import org.elasticsearch.test.transport.AssertingLocalTransport; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportService; + +public class NetworkModuleTests extends ModuleTestCase { + + static class FakeTransportService extends TransportService { + public FakeTransportService() { + super(null, null); + } + } + + static class FakeTransport extends AssertingLocalTransport { + public FakeTransport() { + super(null, null, null, null); + } + } + + static class FakeHttpTransport extends AbstractLifecycleComponent implements HttpServerTransport { + public FakeHttpTransport() { + super(null); + } + @Override + protected void doStart() {} + @Override + protected void doStop() {} + @Override + protected void doClose() {} + @Override + public BoundTransportAddress boundAddress() { + return null; + } + @Override + public HttpInfo info() { + return null; + } + @Override + public HttpStats stats() { + return null; + } + @Override + public void httpServerAdapter(HttpServerAdapter httpServerAdapter) {} + } + + static class FakeRestHandler extends BaseRestHandler { + public FakeRestHandler() { + super(null, null, null); + } + @Override + protected void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception {} + } + + static class FakeCatRestHandler extends AbstractCatAction { + public FakeCatRestHandler() { + super(null, null, null); + } + @Override + protected void doRequest(RestRequest request, RestChannel channel, Client client) {} + @Override + protected void documentation(StringBuilder sb) {} + @Override + protected Table getTableWithHeader(RestRequest request) { + return null; + } + } + + public void testRegisterTransportService() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "custom").build(); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerTransportService("custom", FakeTransportService.class); + assertBinding(module, TransportService.class, FakeTransportService.class); + + // check it works with transport only as well + module = new NetworkModule(new NetworkService(settings), settings, true); + module.registerTransportService("custom", FakeTransportService.class); + assertBinding(module, TransportService.class, FakeTransportService.class); + } + + public void testRegisterTransport() { + Settings settings = Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "custom").build(); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerTransport("custom", FakeTransport.class); + assertBinding(module, Transport.class, FakeTransport.class); + + // check it works with transport only as well + module = new NetworkModule(new NetworkService(settings), settings, true); + module.registerTransport("custom", FakeTransport.class); + assertBinding(module, Transport.class, FakeTransport.class); + } + + public void testRegisterHttpTransport() { + Settings settings = Settings.builder().put(NetworkModule.HTTP_TYPE_KEY, "custom").build(); + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerHttpTransport("custom", FakeHttpTransport.class); + assertBinding(module, HttpServerTransport.class, FakeHttpTransport.class); + + // check registration not allowed for transport only + module = new NetworkModule(new NetworkService(settings), settings, true); + try { + module.registerHttpTransport("custom", FakeHttpTransport.class); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Cannot register http transport")); + assertTrue(e.getMessage().contains("for transport client")); + } + + // not added if http is disabled + settings = Settings.builder().put(NetworkModule.HTTP_ENABLED, false).build(); + module = new NetworkModule(new NetworkService(settings), settings, false); + assertNotBound(module, HttpServerTransport.class); + } + + public void testRegisterRestHandler() { + Settings settings = Settings.EMPTY; + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerRestHandler(FakeRestHandler.class); + // also check a builtin is bound + assertSetMultiBinding(module, RestHandler.class, FakeRestHandler.class, RestMainAction.class); + + // check registration not allowed for transport only + module = new NetworkModule(new NetworkService(settings), settings, true); + try { + module.registerRestHandler(FakeRestHandler.class); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Cannot register rest handler")); + assertTrue(e.getMessage().contains("for transport client")); + } + } + + public void testRegisterCatRestHandler() { + Settings settings = Settings.EMPTY; + NetworkModule module = new NetworkModule(new NetworkService(settings), settings, false); + module.registerRestHandler(FakeCatRestHandler.class); + // also check a builtin is bound + assertSetMultiBinding(module, AbstractCatAction.class, FakeCatRestHandler.class, RestNodesAction.class); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java new file mode 100644 index 00000000000..97393c51b8d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/ScopedSettingsTests.java @@ -0,0 +1,168 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +public class ScopedSettingsTests extends ESTestCase { + + public void testAddConsumer() { + Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, Collections.singleton(testSetting)); + + AtomicInteger consumer = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting, consumer::set); + AtomicInteger consumer2 = new AtomicInteger(); + try { + service.addSettingsUpdateConsumer(testSetting2, consumer2::set); + fail("setting not registered"); + } catch (IllegalArgumentException ex) { + assertEquals("Setting is not registered for key [foo.bar.baz]", ex.getMessage()); + } + + try { + service.addSettingsUpdateConsumer(testSetting, testSetting2, (a, b) -> {consumer.set(a); consumer2.set(b);}); + fail("setting not registered"); + } catch (IllegalArgumentException ex) { + assertEquals("Setting is not registered for key [foo.bar.baz]", ex.getMessage()); + } + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + service.applySettings(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", 15).build()); + assertEquals(2, consumer.get()); + assertEquals(0, consumer2.get()); + } + + public void testApply() { + Setting testSetting = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + Setting testSetting2 = Setting.intSetting("foo.bar.baz", 1, true, Setting.Scope.CLUSTER); + AbstractScopedSettings service = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(testSetting, testSetting2))); + + AtomicInteger consumer = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting, consumer::set); + AtomicInteger consumer2 = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting2, consumer2::set, (s) -> assertTrue(s > 0)); + + AtomicInteger aC = new AtomicInteger(); + AtomicInteger bC = new AtomicInteger(); + service.addSettingsUpdateConsumer(testSetting, testSetting2, (a, b) -> {aC.set(a); bC.set(b);}); + + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + try { + service.applySettings(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", -15).build()); + fail("invalid value"); + } catch (IllegalArgumentException ex) { + assertEquals("illegal value can't update [foo.bar.baz] from [1] to [-15]", ex.getMessage()); + } + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + try { + service.dryRun(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", -15).build()); + fail("invalid value"); + } catch (IllegalArgumentException ex) { + assertEquals("illegal value can't update [foo.bar.baz] from [1] to [-15]", ex.getMessage()); + } + + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + service.dryRun(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", 15).build()); + assertEquals(0, consumer.get()); + assertEquals(0, consumer2.get()); + assertEquals(0, aC.get()); + assertEquals(0, bC.get()); + + service.applySettings(Settings.builder().put("foo.bar", 2).put("foo.bar.baz", 15).build()); + assertEquals(2, consumer.get()); + assertEquals(15, consumer2.get()); + assertEquals(2, aC.get()); + assertEquals(15, bC.get()); + } + + public void testGet() { + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + + // group setting - complex matcher + Setting setting = settings.get("cluster.routing.allocation.require.value"); + assertEquals(setting, FilterAllocationDecider.CLUSTER_ROUTING_REQUIRE_GROUP_SETTING); + + setting = settings.get("cluster.routing.allocation.total_shards_per_node"); + assertEquals(setting, ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING); + + // array settings - complex matcher + assertNotNull(settings.get("transport.tracer.include." + randomIntBetween(1, 100))); + assertSame(TransportService.TRACE_LOG_INCLUDE_SETTING, settings.get("transport.tracer.include." + randomIntBetween(1, 100))); + + // array settings - complex matcher - only accepts numbers + assertNull(settings.get("transport.tracer.include.FOO")); + } + + public void testIsDynamic(){ + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER), Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER)))); + assertFalse(settings.hasDynamicSetting("foo.bar.baz")); + assertTrue(settings.hasDynamicSetting("foo.bar")); + assertNotNull(settings.get("foo.bar.baz")); + settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + assertTrue(settings.hasDynamicSetting("transport.tracer.include." + randomIntBetween(1, 100))); + assertFalse(settings.hasDynamicSetting("transport.tracer.include.BOOM")); + assertTrue(settings.hasDynamicSetting("cluster.routing.allocation.require.value")); + } + + public void testDiff() throws IOException { + Setting foobarbaz = Setting.intSetting("foo.bar.baz", 1, false, Setting.Scope.CLUSTER); + Setting foobar = Setting.intSetting("foo.bar", 1, true, Setting.Scope.CLUSTER); + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, new HashSet<>(Arrays.asList(foobar, foobarbaz))); + Settings diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.EMPTY); + assertEquals(diff.getAsMap().size(), 1); + assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(1)); + + diff = settings.diff(Settings.builder().put("foo.bar", 5).build(), Settings.builder().put("foo.bar.baz", 17).build()); + assertEquals(diff.getAsMap().size(), 1); + assertEquals(diff.getAsInt("foo.bar.baz", null), Integer.valueOf(17)); + } + + public void testUpdateTracer() { + ClusterSettings settings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + AtomicReference> ref = new AtomicReference<>(); + settings.addSettingsUpdateConsumer(TransportService.TRACE_LOG_INCLUDE_SETTING, ref::set); + settings.applySettings(Settings.builder().putArray("transport.tracer.include", "internal:index/shard/recovery/*", "internal:gateway/local*").build()); + assertNotNull(ref.get().size()); + assertEquals(ref.get().size(), 2); + assertTrue(ref.get().contains("internal:index/shard/recovery/*")); + assertTrue(ref.get().contains("internal:gateway/local*")); + } +} diff --git a/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java new file mode 100644 index 00000000000..069418a7e1d --- /dev/null +++ b/core/src/test/java/org/elasticsearch/common/settings/SettingTests.java @@ -0,0 +1,323 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.settings; + +import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; + +public class SettingTests extends ESTestCase { + + + public void testGet() { + Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + assertFalse(booleanSetting.get(Settings.EMPTY)); + assertFalse(booleanSetting.get(Settings.builder().put("foo.bar", false).build())); + assertTrue(booleanSetting.get(Settings.builder().put("foo.bar", true).build())); + } + + public void testByteSize() { + Setting byteSizeValueSetting = Setting.byteSizeSetting("a.byte.size", new ByteSizeValue(1024), true, Setting.Scope.CLUSTER); + assertFalse(byteSizeValueSetting.isGroupSetting()); + ByteSizeValue byteSizeValue = byteSizeValueSetting.get(Settings.EMPTY); + assertEquals(byteSizeValue.bytes(), 1024); + AtomicReference value = new AtomicReference<>(null); + ClusterSettings.SettingUpdater settingUpdater = byteSizeValueSetting.newUpdater(value::set, logger); + try { + settingUpdater.apply(Settings.builder().put("a.byte.size", 12).build(), Settings.EMPTY); + fail("no unit"); + } catch (IllegalArgumentException ex) { + assertEquals("failed to parse setting [a.byte.size] with value [12] as a size in bytes: unit is missing or unrecognized", ex.getMessage()); + } + + assertTrue(settingUpdater.apply(Settings.builder().put("a.byte.size", "12b").build(), Settings.EMPTY)); + assertEquals(new ByteSizeValue(12), value.get()); + } + + public void testSimpleUpdate() { + Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + AtomicReference atomicBoolean = new AtomicReference<>(null); + ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(atomicBoolean::set, logger); + Settings build = Settings.builder().put("foo.bar", false).build(); + settingUpdater.apply(build, Settings.EMPTY); + assertNull(atomicBoolean.get()); + build = Settings.builder().put("foo.bar", true).build(); + settingUpdater.apply(build, Settings.EMPTY); + assertTrue(atomicBoolean.get()); + + // try update bogus value + build = Settings.builder().put("foo.bar", "I am not a boolean").build(); + try { + settingUpdater.apply(build, Settings.EMPTY); + fail("not a boolean"); + } catch (IllegalArgumentException ex) { + assertEquals("Failed to parse value [I am not a boolean] cannot be parsed to boolean [ true/1/on/yes OR false/0/off/no ]", ex.getMessage()); + } + } + + public void testUpdateNotDynamic() { + Setting booleanSetting = Setting.boolSetting("foo.bar", false, false, Setting.Scope.CLUSTER); + assertFalse(booleanSetting.isGroupSetting()); + AtomicReference atomicBoolean = new AtomicReference<>(null); + try { + booleanSetting.newUpdater(atomicBoolean::set, logger); + fail("not dynamic"); + } catch (IllegalStateException ex) { + assertEquals("setting [foo.bar] is not dynamic", ex.getMessage()); + } + } + + public void testUpdaterIsIsolated() { + Setting booleanSetting = Setting.boolSetting("foo.bar", false, true, Setting.Scope.CLUSTER); + AtomicReference ab1 = new AtomicReference<>(null); + AtomicReference ab2 = new AtomicReference<>(null); + ClusterSettings.SettingUpdater settingUpdater = booleanSetting.newUpdater(ab1::set, logger); + ClusterSettings.SettingUpdater settingUpdater2 = booleanSetting.newUpdater(ab2::set, logger); + settingUpdater.apply(Settings.builder().put("foo.bar", true).build(), Settings.EMPTY); + assertTrue(ab1.get()); + assertNull(ab2.get()); + } + + public void testDefault() { + TimeValue defautlValue = TimeValue.timeValueMillis(randomIntBetween(0, 1000000)); + Setting setting = Setting.positiveTimeSetting("my.time.value", defautlValue, randomBoolean(), Setting.Scope.CLUSTER); + assertFalse(setting.isGroupSetting()); + String aDefault = setting.getDefault(Settings.EMPTY); + assertEquals(defautlValue.millis() + "ms", aDefault); + assertEquals(defautlValue.millis(), setting.get(Settings.EMPTY).millis()); + + Setting secondaryDefault = new Setting<>("foo.bar", (s) -> s.get("old.foo.bar", "some_default"), (s) -> s, randomBoolean(), Setting.Scope.CLUSTER); + assertEquals("some_default", secondaryDefault.get(Settings.EMPTY)); + assertEquals("42", secondaryDefault.get(Settings.builder().put("old.foo.bar", 42).build())); + } + + public void testComplexType() { + AtomicReference ref = new AtomicReference<>(null); + Setting setting = new Setting<>("foo.bar", (s) -> "", (s) -> new ComplexType(s), true, Setting.Scope.CLUSTER); + assertFalse(setting.isGroupSetting()); + ref.set(setting.get(Settings.EMPTY)); + ComplexType type = ref.get(); + ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger); + assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); + assertSame("no update - type has not changed", type, ref.get()); + + // change from default + assertTrue(settingUpdater.apply(Settings.builder().put("foo.bar", "2").build(), Settings.EMPTY)); + assertNotSame("update - type has changed", type, ref.get()); + assertEquals("2", ref.get().foo); + + + // change back to default... + assertTrue(settingUpdater.apply(Settings.EMPTY, Settings.builder().put("foo.bar", "2").build())); + assertNotSame("update - type has changed", type, ref.get()); + assertEquals("", ref.get().foo); + } + + public void testType() { + Setting integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.CLUSTER); + assertEquals(integerSetting.getScope(), Setting.Scope.CLUSTER); + integerSetting = Setting.intSetting("foo.int.bar", 1, true, Setting.Scope.INDEX); + assertEquals(integerSetting.getScope(), Setting.Scope.INDEX); + } + + public void testGroups() { + AtomicReference ref = new AtomicReference<>(null); + Setting setting = Setting.groupSetting("foo.bar.", true, Setting.Scope.CLUSTER); + assertTrue(setting.isGroupSetting()); + ClusterSettings.SettingUpdater settingUpdater = setting.newUpdater(ref::set, logger); + + Settings currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build(); + Settings previousInput = Settings.EMPTY; + assertTrue(settingUpdater.apply(currentInput, previousInput)); + assertNotNull(ref.get()); + Settings settings = ref.get(); + Map asMap = settings.getAsGroups(); + assertEquals(3, asMap.size()); + assertEquals(asMap.get("1").get("value"), "1"); + assertEquals(asMap.get("2").get("value"), "2"); + assertEquals(asMap.get("3").get("value"), "3"); + + previousInput = currentInput; + currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").put("foo.bar.3.value", "3").build(); + Settings current = ref.get(); + assertFalse(settingUpdater.apply(currentInput, previousInput)); + assertSame(current, ref.get()); + + previousInput = currentInput; + currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build(); + // now update and check that we got it + assertTrue(settingUpdater.apply(currentInput, previousInput)); + assertNotSame(current, ref.get()); + + asMap = ref.get().getAsGroups(); + assertEquals(2, asMap.size()); + assertEquals(asMap.get("1").get("value"), "1"); + assertEquals(asMap.get("2").get("value"), "2"); + + previousInput = currentInput; + currentInput = Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "4").build(); + // now update and check that we got it + assertTrue(settingUpdater.apply(currentInput, previousInput)); + assertNotSame(current, ref.get()); + + asMap = ref.get().getAsGroups(); + assertEquals(2, asMap.size()); + assertEquals(asMap.get("1").get("value"), "1"); + assertEquals(asMap.get("2").get("value"), "4"); + + assertTrue(setting.match("foo.bar.baz")); + assertFalse(setting.match("foo.baz.bar")); + + ClusterSettings.SettingUpdater predicateSettingUpdater = setting.newUpdater(ref::set, logger,(s) -> assertFalse(true)); + try { + predicateSettingUpdater.apply(Settings.builder().put("foo.bar.1.value", "1").put("foo.bar.2.value", "2").build(), Settings.EMPTY); + fail("not accepted"); + } catch (IllegalArgumentException ex) { + assertEquals(ex.getMessage(), "illegal value can't update [foo.bar.] from [{}] to [{1.value=1, 2.value=2}]"); + } + } + + public static class ComplexType { + + final String foo; + + public ComplexType(String foo) { + this.foo = foo; + } + } + + public static class Composite { + + private Integer b; + private Integer a; + + public void set(Integer a, Integer b) { + this.a = a; + this.b = b; + } + } + + + public void testComposite() { + Composite c = new Composite(); + Setting a = Setting.intSetting("foo.int.bar.a", 1, true, Setting.Scope.CLUSTER); + Setting b = Setting.intSetting("foo.int.bar.b", 1, true, Setting.Scope.CLUSTER); + ClusterSettings.SettingUpdater> settingUpdater = Setting.compoundUpdater(c::set, a, b, logger); + assertFalse(settingUpdater.apply(Settings.EMPTY, Settings.EMPTY)); + assertNull(c.a); + assertNull(c.b); + + Settings build = Settings.builder().put("foo.int.bar.a", 2).build(); + assertTrue(settingUpdater.apply(build, Settings.EMPTY)); + assertEquals(2, c.a.intValue()); + assertEquals(1, c.b.intValue()); + + Integer aValue = c.a; + assertFalse(settingUpdater.apply(build, build)); + assertSame(aValue, c.a); + Settings previous = build; + build = Settings.builder().put("foo.int.bar.a", 2).put("foo.int.bar.b", 5).build(); + assertTrue(settingUpdater.apply(build, previous)); + assertEquals(2, c.a.intValue()); + assertEquals(5, c.b.intValue()); + + // reset to default + assertTrue(settingUpdater.apply(Settings.EMPTY, build)); + assertEquals(1, c.a.intValue()); + assertEquals(1, c.b.intValue()); + + } + + public void testListSettings() { + Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + List value = listSetting.get(Settings.EMPTY); + assertEquals(1, value.size()); + assertEquals("foo,bar", value.get(0)); + + List input = Arrays.asList("test", "test1, test2", "test", ",,,,"); + Settings.Builder builder = Settings.builder().putArray("foo.bar", input.toArray(new String[0])); + value = listSetting.get(builder.build()); + assertEquals(input.size(), value.size()); + assertArrayEquals(value.toArray(new String[0]), input.toArray(new String[0])); + + // try to parse this really annoying format + builder = Settings.builder(); + for (int i = 0; i < input.size(); i++) { + builder.put("foo.bar." + i, input.get(i)); + } + value = listSetting.get(builder.build()); + assertEquals(input.size(), value.size()); + assertArrayEquals(value.toArray(new String[0]), input.toArray(new String[0])); + + AtomicReference> ref = new AtomicReference<>(); + AbstractScopedSettings.SettingUpdater settingUpdater = listSetting.newUpdater(ref::set, logger); + assertTrue(settingUpdater.hasChanged(builder.build(), Settings.EMPTY)); + settingUpdater.apply(builder.build(), Settings.EMPTY); + assertEquals(input.size(), ref.get().size()); + assertArrayEquals(ref.get().toArray(new String[0]), input.toArray(new String[0])); + + settingUpdater.apply(Settings.builder().putArray("foo.bar", "123").build(), builder.build()); + assertEquals(1, ref.get().size()); + assertArrayEquals(ref.get().toArray(new String[0]), new String[] {"123"}); + + settingUpdater.apply(Settings.builder().put("foo.bar", "1,2,3").build(), Settings.builder().putArray("foo.bar", "123").build()); + assertEquals(3, ref.get().size()); + assertArrayEquals(ref.get().toArray(new String[0]), new String[] {"1", "2", "3"}); + + settingUpdater.apply(Settings.EMPTY, Settings.builder().put("foo.bar", "1,2,3").build()); + assertEquals(1, ref.get().size()); + assertEquals("foo,bar", ref.get().get(0)); + + Setting> otherSettings = Setting.listSetting("foo.bar", Collections.emptyList(), Integer::parseInt, true, Setting.Scope.CLUSTER); + List defaultValue = otherSettings.get(Settings.EMPTY); + assertEquals(0, defaultValue.size()); + List intValues = otherSettings.get(Settings.builder().put("foo.bar", "0,1,2,3").build()); + assertEquals(4, intValues.size()); + for (int i = 0; i < intValues.size(); i++) { + assertEquals(i, intValues.get(i).intValue()); + } + } + + public void testListSettingAcceptsNumberSyntax() { + Setting> listSetting = Setting.listSetting("foo.bar", Arrays.asList("foo,bar"), (s) -> s.toString(), true, Setting.Scope.CLUSTER); + List input = Arrays.asList("test", "test1, test2", "test", ",,,,"); + Settings.Builder builder = Settings.builder().putArray("foo.bar", input.toArray(new String[0])); + // try to parse this really annoying format + for (String key : builder.internalMap().keySet()) { + assertTrue("key: " + key + " doesn't match", listSetting.match(key)); + } + builder = Settings.builder().put("foo.bar", "1,2,3"); + for (String key : builder.internalMap().keySet()) { + assertTrue("key: " + key + " doesn't match", listSetting.match(key)); + } + assertFalse(listSetting.match("foo_bar")); + assertFalse(listSetting.match("foo_bar.1")); + assertTrue(listSetting.match("foo.bar")); + assertTrue(listSetting.match("foo.bar." + randomIntBetween(0,10000))); + + } +} diff --git a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index ec0e26091df..2945d86fe59 100644 --- a/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -162,4 +162,14 @@ public class TimeValueTests extends ESTestCase { assertThat(e.getMessage(), containsString("Failed to parse")); } } + + public void testToStringRep() { + assertEquals("-1", new TimeValue(-1).getStringRep()); + assertEquals("10ms", new TimeValue(10, TimeUnit.MILLISECONDS).getStringRep()); + assertEquals("1533ms", new TimeValue(1533, TimeUnit.MILLISECONDS).getStringRep()); + assertEquals("90s", new TimeValue(90, TimeUnit.SECONDS).getStringRep()); + assertEquals("90m", new TimeValue(90, TimeUnit.MINUTES).getStringRep()); + assertEquals("36h", new TimeValue(36, TimeUnit.HOURS).getStringRep()); + assertEquals("1000d", new TimeValue(1000, TimeUnit.DAYS).getStringRep()); + } } diff --git a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 7d36c09ee19..184de7f385e 100644 --- a/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -21,13 +21,13 @@ package org.elasticsearch.common.util; import org.apache.lucene.util.BytesRef; import org.elasticsearch.cache.recycler.PageCacheRecycler; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.Before; @@ -336,9 +336,9 @@ public class BigArraysTests extends ESSingleNodeTestCase { for (String type : Arrays.asList("Byte", "Int", "Long", "Float", "Double", "Object")) { HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( Settings.builder() - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, size - 1, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), size - 1, ByteSizeUnit.BYTES) .build(), - new NodeSettingsService(Settings.EMPTY)); + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); try { @@ -356,9 +356,9 @@ public class BigArraysTests extends ESSingleNodeTestCase { final long maxSize = randomIntBetween(1 << 10, 1 << 22); HierarchyCircuitBreakerService hcbs = new HierarchyCircuitBreakerService( Settings.builder() - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, maxSize, ByteSizeUnit.BYTES) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), maxSize, ByteSizeUnit.BYTES) .build(), - new NodeSettingsService(Settings.EMPTY)); + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); final int size = scaledRandomIntBetween(1, 20); diff --git a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java index fe9ba6b1fcf..4c3612da8e0 100644 --- a/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/CollectionUtilsTests.java @@ -25,14 +25,7 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.Counter; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.SortedSet; -import java.util.TreeSet; +import java.util.*; import static org.elasticsearch.common.util.CollectionUtils.eagerPartition; import static org.hamcrest.Matchers.equalTo; @@ -80,7 +73,7 @@ public class CollectionUtilsTests extends ESTestCase { array.append(new BytesRef(s)); } if (randomBoolean()) { - Collections.shuffle(tmpList, getRandom()); + Collections.shuffle(tmpList, random()); for (BytesRef ref : tmpList) { array.append(ref); } @@ -111,7 +104,7 @@ public class CollectionUtilsTests extends ESTestCase { array.append(new BytesRef(s)); } if (randomBoolean()) { - Collections.shuffle(values, getRandom()); + Collections.shuffle(values, random()); } int[] indices = new int[array.size()]; for (int i = 0; i < indices.length; i++) { diff --git a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java index 1d2d2141166..deac15b50d3 100644 --- a/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java +++ b/core/src/test/java/org/elasticsearch/common/util/concurrent/PrioritizedExecutorsTests.java @@ -27,13 +27,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.PriorityBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; +import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.equalTo; @@ -46,7 +40,7 @@ public class PrioritizedExecutorsTests extends ESTestCase { public void testPriorityQueue() throws Exception { PriorityBlockingQueue queue = new PriorityBlockingQueue<>(); List priorities = Arrays.asList(Priority.values()); - Collections.shuffle(priorities); + Collections.shuffle(priorities, random()); for (Priority priority : priorities) { queue.add(priority); diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java index 7ffafc004ab..9129e3c05b3 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/builder/XContentBuilderTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.common.xcontent.builder; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.io.FastCharArrayWriter; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -39,6 +38,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; +import java.util.Collections; import java.util.Date; import java.util.GregorianCalendar; import java.util.HashMap; @@ -51,9 +51,6 @@ import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConvers import static org.elasticsearch.common.xcontent.XContentBuilder.FieldCaseConversion.UNDERSCORE; import static org.hamcrest.Matchers.equalTo; -/** - * - */ public class XContentBuilderTests extends ESTestCase { public void testPrettyWithLfAtEnd() throws Exception { ByteArrayOutputStream os = new ByteArrayOutputStream(); @@ -350,4 +347,33 @@ public class XContentBuilderTests extends ESTestCase { "}", string.trim()); } + public void testWriteMapWithNullKeys() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + try { + builder.map(Collections.singletonMap(null, "test")); + fail("write map should have failed"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field name cannot be null")); + } + } + + public void testWriteMapValueWithNullKeys() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + try { + builder.value(Collections.singletonMap(null, "test")); + fail("write map should have failed"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field name cannot be null")); + } + } + + public void testWriteFieldMapWithNullKeys() throws IOException { + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + try { + builder.field("map", Collections.singletonMap(null, "test")); + fail("write map should have failed"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), equalTo("field name cannot be null")); + } + } } diff --git a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java index b14792a2c33..b9d4fdec913 100644 --- a/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/DiscoveryWithServiceDisruptionsIT.java @@ -132,7 +132,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // for hitting simulated network failures quickly .put(FaultDetection.SETTING_PING_RETRIES, "1") // for hitting simulated network failures quickly .put("discovery.zen.join_timeout", "10s") // still long to induce failures but to long so test won't time out - .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly .put("http.enabled", false) // just to make test quicker .put("gateway.local.list_timeout", "10s") // still long to induce failures but to long so test won't time out .build(); @@ -150,7 +150,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // TODO: Rarely use default settings form some of these Settings nodeSettings = Settings.builder() .put(DEFAULT_SETTINGS) - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, minimumMasterNode) + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode) .build(); if (discoveryConfig == null) { @@ -217,7 +217,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { logger.info("--> reducing min master nodes to 2"); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2)).get()); + .setTransientSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2)).get()); String master = internalCluster().getMasterName(); String nonMaster = null; @@ -293,9 +293,9 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { // Wait until the master node sees al 3 nodes again. ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkPartition.expectedTimeToHeal().millis())); - logger.info("Verify no master block with {} set to {}", DiscoverySettings.NO_MASTER_BLOCK, "all"); + logger.info("Verify no master block with {} set to {}", DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all"); client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(DiscoverySettings.NO_MASTER_BLOCK, "all")) + .setTransientSettings(Settings.builder().put(DiscoverySettings.NO_MASTER_BLOCK_SETTING.getKey(), "all")) .get(); networkPartition.startDisrupting(); @@ -473,7 +473,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { docsPerIndexer = 1 + randomInt(5); logger.info("indexing " + docsPerIndexer + " docs per indexer during partition"); countDownLatchRef.set(new CountDownLatch(docsPerIndexer * indexers.size())); - Collections.shuffle(semaphores); + Collections.shuffle(semaphores, random()); for (Semaphore semaphore : semaphores) { assertThat(semaphore.availablePermits(), equalTo(0)); semaphore.release(docsPerIndexer); @@ -683,7 +683,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { ensureGreen("test"); nodes = new ArrayList<>(nodes); - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); String isolatedNode = nodes.get(0); String notIsolatedNode = nodes.get(1); @@ -863,7 +863,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { internalCluster().startNodesAsync(3, Settings.builder() .put(DiscoveryService.SETTING_INITIAL_STATE_TIMEOUT, "1ms") - .put(DiscoverySettings.PUBLISH_TIMEOUT, "3s") + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "3s") .build()).get(); logger.info("applying disruption while cluster is forming ..."); @@ -1038,7 +1038,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase { new NetworkDisconnectPartition(getRandom()), new SlowClusterStateProcessing(getRandom()) ); - Collections.shuffle(list); + Collections.shuffle(list, random()); setDisruptionScheme(list.get(0)); return list.get(0); } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java index e7bded344d9..c4955561905 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ElectMasterServiceTests.java @@ -26,11 +26,7 @@ import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESTestCase; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; public class ElectMasterServiceTests extends ESTestCase { @@ -53,7 +49,7 @@ public class ElectMasterServiceTests extends ESTestCase { nodes.add(node); } - Collections.shuffle(nodes, getRandom()); + Collections.shuffle(nodes, random()); return nodes; } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java index ea590756a8b..4a248784f91 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/NodeJoinControllerTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -38,7 +39,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.BaseFuture; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.membership.MembershipAction; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.cluster.TestClusterService; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -67,7 +67,7 @@ public class NodeJoinControllerTests extends ESTestCase { // make sure we have a master clusterService.setState(ClusterState.builder(clusterService.state()).nodes(DiscoveryNodes.builder(initialNodes).masterNodeId(localNode.id()))); nodeJoinController = new NodeJoinController(clusterService, new NoopRoutingService(Settings.EMPTY), - new DiscoverySettings(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)), Settings.EMPTY); + new DiscoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), Settings.EMPTY); } public void testSimpleJoinAccumulation() throws InterruptedException, ExecutionException { @@ -244,7 +244,7 @@ public class NodeJoinControllerTests extends ESTestCase { // add - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] unique master nodes. Total of [{}] join requests", initialJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); @@ -269,7 +269,7 @@ public class NodeJoinControllerTests extends ESTestCase { } } - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", finalJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); @@ -316,7 +316,7 @@ public class NodeJoinControllerTests extends ESTestCase { nodesToJoin.add(node); } } - Collections.shuffle(nodesToJoin); + Collections.shuffle(nodesToJoin, random()); logger.debug("--> joining [{}] nodes, with repetition a total of [{}]", initialJoins, nodesToJoin.size()); for (DiscoveryNode node : nodesToJoin) { pendingJoins.add(joinNodeAsync(node)); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java index 0b5f9997dba..217e86526cc 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryIT.java @@ -24,10 +24,8 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; @@ -45,6 +43,7 @@ import org.elasticsearch.discovery.zen.fd.FaultDetection; import org.elasticsearch.discovery.zen.membership.MembershipAction; import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.TestCustomMetaData; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.BytesTransportRequest; @@ -57,9 +56,7 @@ import org.hamcrest.Matchers; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; +import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicReference; @@ -84,7 +81,7 @@ public class ZenDiscoveryIT extends ESIntegTestCase { assertThat(zenDiscovery.isRejoinOnMasterGone(), is(true)); client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(ZenDiscovery.SETTING_REJOIN_ON_MASTER_GONE, false)) + .setTransientSettings(Settings.builder().put(ZenDiscovery.REJOIN_ON_MASTER_GONE_SETTING.getKey(), false)) .get(); assertThat(zenDiscovery.isRejoinOnMasterGone(), is(false)); @@ -228,16 +225,69 @@ public class ZenDiscoveryIT extends ESIntegTestCase { assertThat(ExceptionsHelper.detailedMessage(reference.get()), containsString("cluster state from a different master than the current one, rejecting")); } + public void testHandleNodeJoin_incompatibleClusterState() throws UnknownHostException { + Settings nodeSettings = Settings.settingsBuilder() + .put("discovery.type", "zen") // <-- To override the local setting if set externally + .build(); + String masterOnlyNode = internalCluster().startMasterOnlyNode(nodeSettings); + String node1 = internalCluster().startNode(nodeSettings); + ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, masterOnlyNode); + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, node1); + final ClusterState state = clusterService.state(); + MetaData.Builder mdBuilder = MetaData.builder(state.metaData()); + mdBuilder.putCustom(CustomMetaData.TYPE, new CustomMetaData("data")); + ClusterState stateWithCustomMetaData = ClusterState.builder(state).metaData(mdBuilder).build(); + + final AtomicReference holder = new AtomicReference<>(); + DiscoveryNode node = state.nodes().localNode(); + zenDiscovery.handleJoinRequest(node, stateWithCustomMetaData, new MembershipAction.JoinCallback() { + @Override + public void onSuccess() { + } + + @Override + public void onFailure(Throwable t) { + holder.set((IllegalStateException) t); + } + }); + + assertThat(holder.get(), notNullValue()); + assertThat(holder.get().getMessage(), equalTo("failure when sending a validation request to node")); + } + + public static class CustomMetaData extends TestCustomMetaData { + public static final String TYPE = "custom_md"; + + CustomMetaData(String data) { + super(data); + } + + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new CustomMetaData(data); + } + + @Override + public String type() { + return TYPE; + } + + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); + } + } + public void testHandleNodeJoin_incompatibleMinVersion() throws UnknownHostException { Settings nodeSettings = Settings.settingsBuilder() .put("discovery.type", "zen") // <-- To override the local setting if set externally .build(); String nodeName = internalCluster().startNode(nodeSettings, Version.V_2_0_0_beta1); ZenDiscovery zenDiscovery = (ZenDiscovery) internalCluster().getInstance(Discovery.class, nodeName); - + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); DiscoveryNode node = new DiscoveryNode("_node_id", new InetSocketTransportAddress(InetAddress.getByName("0.0.0.0"), 0), Version.V_1_6_0); final AtomicReference holder = new AtomicReference<>(); - zenDiscovery.handleJoinRequest(node, new MembershipAction.JoinCallback() { + zenDiscovery.handleJoinRequest(node, clusterService.state(), new MembershipAction.JoinCallback() { @Override public void onSuccess() { } diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java index 82733d92206..c54489bceba 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/ZenPingTests.java @@ -62,7 +62,7 @@ public class ZenPingTests extends ESTestCase { } // shuffle - Collections.shuffle(pings); + Collections.shuffle(pings, random()); ZenPing.PingCollection collection = new ZenPing.PingCollection(); collection.addPings(pings.toArray(new ZenPing.PingResponse[pings.size()])); diff --git a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java index d33aadd84aa..0bac1bc4c2e 100644 --- a/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java +++ b/core/src/test/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateActionTests.java @@ -21,17 +21,15 @@ package org.elasticsearch.discovery.zen.publish; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -42,40 +40,24 @@ import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.node.service.NodeService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BytesTransportRequest; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportConnectionListener; -import org.elasticsearch.transport.TransportResponse; -import org.elasticsearch.transport.TransportResponseOptions; -import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.*; import org.elasticsearch.transport.local.LocalTransport; import org.junit.After; import org.junit.Before; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.emptyIterable; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; @TestLogging("discovery.zen.publish:TRACE") public class PublishClusterStateActionTests extends ESTestCase { @@ -156,7 +138,7 @@ public class PublishClusterStateActionTests extends ESTestCase { public MockNode createMockNode(String name, Settings settings, Version version, @Nullable ClusterStateListener listener) throws Exception { settings = Settings.builder() .put("name", name) - .put(TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING") + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .put(settings) .build(); @@ -237,7 +219,7 @@ public class PublishClusterStateActionTests extends ESTestCase { protected MockPublishAction buildPublishClusterStateAction(Settings settings, MockTransportService transportService, DiscoveryNodesProvider nodesProvider, PublishClusterStateAction.NewPendingClusterStateListener listener) { - DiscoverySettings discoverySettings = new DiscoverySettings(settings, new NodeSettingsService(settings)); + DiscoverySettings discoverySettings = new DiscoverySettings(settings, new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); return new MockPublishAction(settings, transportService, nodesProvider, listener, discoverySettings, ClusterName.DEFAULT); } @@ -345,7 +327,7 @@ public class PublishClusterStateActionTests extends ESTestCase { } public void testDisablingDiffPublishing() throws Exception { - Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, false).build(); + Settings noDiffPublishingSettings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), false).build(); MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, Version.CURRENT, new ClusterStateListener() { @Override @@ -384,7 +366,7 @@ public class PublishClusterStateActionTests extends ESTestCase { public void testSimultaneousClusterStatePublishing() throws Exception { int numberOfNodes = randomIntBetween(2, 10); int numberOfIterations = scaledRandomIntBetween(5, 50); - Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, randomBoolean()).build(); + Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.getKey(), randomBoolean()).build(); MockNode master = createMockNode("node0", settings, Version.CURRENT, new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { @@ -510,8 +492,8 @@ public class PublishClusterStateActionTests extends ESTestCase { final boolean expectingToCommit = randomBoolean(); Settings.Builder settings = Settings.builder(); // make sure we have a reasonable timeout if we expect to timeout, o.w. one that will make the test "hang" - settings.put(DiscoverySettings.COMMIT_TIMEOUT, expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h") - .put(DiscoverySettings.PUBLISH_TIMEOUT, "5ms"); // test is about committing + settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h") + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing MockNode master = createMockNode("master", settings.build()); @@ -675,7 +657,7 @@ public class PublishClusterStateActionTests extends ESTestCase { logger.info("--> committing states"); - Collections.shuffle(states, random()); + Randomness.shuffle(states); for (ClusterState state : states) { node.action.handleCommitRequest(new PublishClusterStateAction.CommitClusterStateRequest(state.stateUUID()), channel); assertThat(channel.response.get(), equalTo((TransportResponse) TransportResponse.Empty.INSTANCE)); @@ -695,7 +677,7 @@ public class PublishClusterStateActionTests extends ESTestCase { */ public void testTimeoutOrCommit() throws Exception { Settings settings = Settings.builder() - .put(DiscoverySettings.COMMIT_TIMEOUT, "1ms").build(); // short but so we will sometime commit sometime timeout + .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout MockNode master = createMockNode("master", settings); MockNode node = createMockNode("node", settings); diff --git a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 15ddc9dd771..2c6a55da242 100644 --- a/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -57,7 +57,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { //ridiculous settings to make sure we don't run into uninitialized because fo default AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 100) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) .build()); @@ -111,7 +111,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase { //ridiculous settings to make sure we don't run into uninitialized because fo default AllocationService strategy = createAllocationService(settingsBuilder() .put("cluster.routing.allocation.concurrent_recoveries", 100) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "always") + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.cluster_concurrent_rebalance", 100) .put("cluster.routing.allocation.node_initial_primaries_recoveries", 100) .build()); diff --git a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index d7f4c9176d0..441314b1e35 100644 --- a/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -19,12 +19,7 @@ package org.elasticsearch.gateway; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.ChecksumIndexInput; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.store.SimpleFSDirectory; +import org.apache.lucene.store.*; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -33,11 +28,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.*; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -50,20 +41,10 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardOpenOption; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Set; +import java.util.*; import java.util.stream.StreamSupport; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.startsWith; +import static org.hamcrest.Matchers.*; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to work with ExtrasFS public class MetaDataStateFormatTests extends ESTestCase { @@ -349,7 +330,7 @@ public class MetaDataStateFormatTests extends ESTestCase { } List dirList = Arrays.asList(dirs); - Collections.shuffle(dirList, getRandom()); + Collections.shuffle(dirList, random()); MetaData loadedMetaData = format.loadLatestState(logger, dirList.toArray(new Path[0])); MetaData latestMetaData = meta.get(numStates-1); assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_"))); diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index 73cbb51faed..193985a1c68 100644 --- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -36,7 +36,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Before; -import java.io.IOException; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -59,25 +59,29 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { this.testAllocator = new TestAllocator(); } - /** - * Verifies that the canProcess method of primary allocation behaves correctly - * and processes only the applicable shard. - */ - public void testNoProcessReplica() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); - assertThat(testAllocator.needToFindPrimaryCopy(shard), equalTo(false)); - } - - public void testNoProcessPrimayNotAllcoatedBefore() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, true, ShardRoutingState.UNASSIGNED, 0, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); - assertThat(testAllocator.needToFindPrimaryCopy(shard), equalTo(false)); + public void testNoProcessPrimaryNotAllocatedBefore() { + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomBoolean(), Version.CURRENT); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), true, Version.V_2_1_0); + } + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().iterator().next().shardId(), equalTo(shardId)); } /** * Tests that when async fetch returns that there is no data, the shard will not be allocated. */ public void testNoAsyncFetchData() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -85,11 +89,17 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests when the node returns that no data was found for it (-1), it will be moved to ignore unassigned. + * Tests when the node returns that no data was found for it (-1 for version and null for allocation id), + * it will be moved to ignore unassigned. */ public void testNoAllocationFound() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, -1); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "allocId"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_0); + } + testAllocator.addData(node1, -1, null); boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -97,11 +107,43 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests when the node returns that no data was found for it (-1), it will be moved to ignore unassigned. + * Tests when the node returns data with a shard allocation id that does not match active allocation ids, it will be moved to ignore unassigned. + */ + public void testNoMatchingAllocationIdFound() { + RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "id2"); + testAllocator.addData(node1, 1, "id1"); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); + } + + /** + * Tests that when there is a node to allocate the shard to, and there are no active allocation ids, it will be allocated to it. + * This is the case when we have old shards from pre-3.0 days. + */ + public void testNoActiveAllocationIds() { + RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1); + testAllocator.addData(node1, 1, null); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node1.id())); + } + + /** + * Tests when the node returns that no data was found for it, it will be moved to ignore unassigned. */ public void testStoreException() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, 3, new CorruptIndexException("test", "test")); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1", new CorruptIndexException("test", "test")); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1); + testAllocator.addData(node1, 3, null, new CorruptIndexException("test", "test")); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -112,8 +154,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * Tests that when there is a node to allocate the shard to, it will be allocated to it. */ public void testFoundAllocationAndAllocating() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, 10); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_2_0); + testAllocator.addData(node1, 3, null); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -126,8 +174,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * it will be moved to ignore unassigned until it can be allocated to. */ public void testFoundAllocationButThrottlingDecider() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders()); - testAllocator.addData(node1, 10); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, Version.V_2_2_0); + testAllocator.addData(node1, 3, null); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); @@ -139,8 +193,14 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * force the allocation to it. */ public void testFoundAllocationButNoDecider() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders()); - testAllocator.addData(node1, 10); + final RoutingAllocation allocation; + if (randomBoolean()) { + allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1"); + testAllocator.addData(node1, 1, "allocId1"); + } else { + allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, Version.V_2_0_0); + testAllocator.addData(node1, 3, null); + } boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -149,11 +209,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests that the highest version node is chosed for allocation. + * Tests that the highest version node is chosen for allocation. */ - public void testAllocateToTheHighestVersion() { - RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders()); - testAllocator.addData(node1, 10).addData(node2, 12); + public void testAllocateToTheHighestVersionOnLegacyIndex() { + RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_0_0); + testAllocator.addData(node1, 10, null).addData(node2, 12, null); boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); @@ -162,35 +222,150 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { } /** - * Tests that when restoring from snapshot, even if we didn't find any node to allocate on, the shard - * will remain in the unassigned list to be allocated later. + * Tests that when restoring from a snapshot and we find a node with a shard copy and allocation + * deciders say yes, we allocate to that node. */ - public void testRestoreIgnoresNoNodesToAllocate() { - MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) - .build(); - RoutingTable routingTable = RoutingTable.builder() - .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), Version.CURRENT, shardId.getIndex())) - .build(); - ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) - .metaData(metaData) - .routingTable(routingTable) - .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); - RoutingAllocation allocation = new RoutingAllocation(yesAllocationDeciders(), state.getRoutingNodes(), state.nodes(), null, System.nanoTime()); + public void testRestore() { + RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } - testAllocator.addData(node1, -1).addData(node2, -1); + /** + * Tests that when restoring from a snapshot and we find a node with a shard copy and allocation + * deciders say throttle, we add it to ignored shards. + */ + public void testRestoreThrottle() { + RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false)); + } + + /** + * Tests that when restoring from a snapshot and we find a node with a shard copy but allocation + * deciders say no, we still allocate to that node. + */ + public void testRestoreForcesAllocateIfShardAvailable() { + RoutingAllocation allocation = getRestoreRoutingAllocation(noAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "some allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when restoring from a snapshot and we don't find a node with a shard copy, the shard will remain in + * the unassigned list to be allocated later. + */ + public void testRestoreDoesNotAssignIfNoShardAvailable() { + RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, -1, null); boolean changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); + } + + private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocationDeciders) { + Version version = randomFrom(Version.CURRENT, Version.V_2_0_0); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version)).numberOfShards(1).numberOfReplicas(0) + .putActiveAllocationIds(0, version == Version.CURRENT ? new HashSet<>(Arrays.asList("allocId")) : Collections.emptySet())) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), version, shardId.getIndex())) + .build(); + ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) + .metaData(metaData) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation + * deciders say yes, we allocate to that node. + */ + public void testRecoverOnAnyNode() { + RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy and allocation + * deciders say throttle, we add it to ignored shards. + */ + public void testRecoverOnAnyNodeThrottle() { + RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false)); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we find a node with a shard copy but allocation + * deciders say no, we still allocate to that node. + */ + public void testRecoverOnAnyNodeForcesAllocateIfShardAvailable() { + RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(noAllocationDeciders()); + testAllocator.addData(node1, 1, randomFrom(null, "allocId")); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(true)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); + } + + /** + * Tests that when recovering using "recover_on_any_node" and we don't find a node with a shard copy we let + * BalancedShardAllocator assign the shard + */ + public void testRecoverOnAnyNodeDoesNotAssignIfNoShardAvailable() { + RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders()); + testAllocator.addData(node1, -1, null); + boolean changed = testAllocator.allocateUnassigned(allocation); + assertThat(changed, equalTo(false)); + assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); + assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); + } + + private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders) { + Version version = randomFrom(Version.CURRENT, Version.V_2_0_0); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version) + .put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true) + .put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true)) + .numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, version == Version.CURRENT ? new HashSet<>(Arrays.asList("allocId")) : Collections.emptySet())) + .build(); + + RoutingTable routingTable = RoutingTable.builder() + .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), Version.CURRENT, shardId.getIndex())) + .build(); + ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) + .metaData(metaData) + .routingTable(routingTable) + .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); + return new RoutingAllocation(allocationDeciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); } /** * Tests that only when enough copies of the shard exists we are going to allocate it. This test * verifies that with same version (1), and quorum allocation. */ - public void testEnoughCopiesFoundForAllocation() { + public void testEnoughCopiesFoundForAllocationOnLegacyIndex() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.V_2_0_0)).numberOfShards(1).numberOfReplicas(2)) .build(); RoutingTable routingTable = RoutingTable.builder() .addAsRecovery(metaData.index(shardId.getIndex())) @@ -207,7 +382,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node1, 1); + testAllocator.addData(node1, 1, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); @@ -215,7 +390,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node2, 1); + testAllocator.addData(node2, 1, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); @@ -229,9 +404,9 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { * Tests that only when enough copies of the shard exists we are going to allocate it. This test * verifies that even with different version, we treat different versions as a copy, and count them. */ - public void testEnoughCopiesFoundForAllocationWithDifferentVersion() { + public void testEnoughCopiesFoundForAllocationOnLegacyIndexWithDifferentVersion() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.V_2_0_0)).numberOfShards(1).numberOfReplicas(2)) .build(); RoutingTable routingTable = RoutingTable.builder() .addAsRecovery(metaData.index(shardId.getIndex())) @@ -248,7 +423,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node1, 1); + testAllocator.addData(node1, 1, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(false)); @@ -256,7 +431,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(2)); // replicas - testAllocator.addData(node2, 2); + testAllocator.addData(node2, 2, null); allocation = new RoutingAllocation(yesAllocationDeciders(), new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); changed = testAllocator.allocateUnassigned(allocation); assertThat(changed, equalTo(true)); @@ -266,67 +441,20 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id())); } - public void testAllocationOnAnyNodeWithSharedFs() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, - ShardRoutingState.UNASSIGNED, 0, - new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); - - Map data = new HashMap<>(); - data.put(node1, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node1, 1)); - data.put(node2, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node2, 5)); - data.put(node3, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node3, -1)); - AsyncShardFetch.FetchResult fetches = - new AsyncShardFetch.FetchResult(shardId, data, new HashSet<>(), new HashSet<>()); - - PrimaryShardAllocator.NodesAndVersions nAndV = testAllocator.buildNodesAndVersions(shard, false, new HashSet(), fetches); - assertThat(nAndV.allocationsFound, equalTo(2)); - assertThat(nAndV.highestVersion, equalTo(5L)); - assertThat(nAndV.nodes, contains(node2)); - - nAndV = testAllocator.buildNodesAndVersions(shard, true, new HashSet(), fetches); - assertThat(nAndV.allocationsFound, equalTo(3)); - assertThat(nAndV.highestVersion, equalTo(5L)); - // All three nodes are potential candidates because shards can be recovered on any node - assertThat(nAndV.nodes, contains(node2, node1, node3)); - } - - public void testAllocationOnAnyNodeShouldPutNodesWithExceptionsLast() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 0, null, null, null, false, - ShardRoutingState.UNASSIGNED, 0, - new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null)); - - Map data = new HashMap<>(); - data.put(node1, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node1, 1)); - data.put(node2, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node2, 1)); - data.put(node3, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node3, 1, new IOException("I failed to open"))); - HashSet ignoredNodes = new HashSet<>(); - ignoredNodes.add(node2.id()); - AsyncShardFetch.FetchResult fetches = - new AsyncShardFetch.FetchResult(shardId, data, new HashSet<>(), ignoredNodes); - - PrimaryShardAllocator.NodesAndVersions nAndV = testAllocator.buildNodesAndVersions(shard, false, ignoredNodes, fetches); - assertThat(nAndV.allocationsFound, equalTo(1)); - assertThat(nAndV.highestVersion, equalTo(1L)); - assertThat(nAndV.nodes, contains(node1)); - - nAndV = testAllocator.buildNodesAndVersions(shard, true, ignoredNodes, fetches); - assertThat(nAndV.allocationsFound, equalTo(2)); - assertThat(nAndV.highestVersion, equalTo(1L)); - // node3 should be last here - assertThat(nAndV.nodes.size(), equalTo(2)); - assertThat(nAndV.nodes, contains(node1, node3)); - } - - private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders) { + private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas(AllocationDeciders deciders, boolean asNew, Version version, String... activeAllocationIds) { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) - .build(); - RoutingTable routingTable = RoutingTable.builder() - .addAsRecovery(metaData.index(shardId.getIndex())) - .build(); + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(version)) + .numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, new HashSet<>(Arrays.asList(activeAllocationIds)))) + .build(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + if (asNew) { + routingTableBuilder.addAsNew(metaData.index(shardId.getIndex())); + } else { + routingTableBuilder.addAsRecovery(metaData.index(shardId.getIndex())); + } ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT) .metaData(metaData) - .routingTable(routingTable) + .routingTable(routingTableBuilder.build()) .nodes(DiscoveryNodes.builder().put(node1).put(node2).put(node3)).build(); return new RoutingAllocation(deciders, new RoutingNodes(state, false), state.nodes(), null, System.nanoTime()); } @@ -344,15 +472,15 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { return this; } - public TestAllocator addData(DiscoveryNode node, long version) { - return addData(node, version, null); + public TestAllocator addData(DiscoveryNode node, long version, String allocationId) { + return addData(node, version, allocationId, null); } - public TestAllocator addData(DiscoveryNode node, long version, @Nullable Throwable storeException) { + public TestAllocator addData(DiscoveryNode node, long version, String allocationId, @Nullable Throwable storeException) { if (data == null) { data = new HashMap<>(); } - data.put(node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, version, storeException)); + data.put(node, new TransportNodesListGatewayStartedShards.NodeGatewayStartedShards(node, version, allocationId, storeException)); return this; } diff --git a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java index edde1720474..a817b23949f 100644 --- a/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/QuorumGatewayIT.java @@ -20,10 +20,10 @@ package org.elasticsearch.gateway; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -32,14 +32,10 @@ import org.elasticsearch.test.InternalTestCluster.RestartCallback; import java.util.concurrent.TimeUnit; import static org.elasticsearch.client.Requests.clusterHealthRequest; -import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.notNullValue; /** * @@ -51,72 +47,12 @@ public class QuorumGatewayIT extends ESIntegTestCase { return 2; } - public void testChangeInitialShardsRecovery() throws Exception { - logger.info("--> starting 3 nodes"); - final String[] nodes = internalCluster().startNodesAsync(3).get().toArray(new String[0]); - - createIndex("test"); - ensureGreen(); - NumShards test = getNumShards("test"); - - logger.info("--> indexing..."); - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); - //We don't check for failures in the flush response: if we do we might get the following: - // FlushNotAllowedEngineException[[test][1] recovery is in progress, flush [COMMIT_TRANSLOG] is not allowed] - flush(); - client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("field", "value2").endObject()).get(); - refresh(); - - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l); - } - - final String nodeToRemove = nodes[between(0,2)]; - logger.info("--> restarting 1 nodes -- kill 2"); - internalCluster().fullRestart(new RestartCallback() { - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - return Settings.EMPTY; - } - - @Override - public boolean doRestart(String nodeName) { - return nodeToRemove.equals(nodeName); - } - }); - if (randomBoolean()) { - Thread.sleep(between(1, 400)); // wait a bit and give is a chance to try to allocate - } - ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForNodes("1")).actionGet(); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.RED)); // nothing allocated yet - assertTrue(awaitBusy(() -> { - ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get(); - return clusterStateResponse.getState() != null && clusterStateResponse.getState().routingTable().index("test") != null; - })); // wait until we get a cluster state - could be null if we quick enough. - final ClusterStateResponse clusterStateResponse = internalCluster().smartClient().admin().cluster().prepareState().setMasterNodeTimeout("500ms").get(); - assertThat(clusterStateResponse.getState(), notNullValue()); - assertThat(clusterStateResponse.getState().routingTable().index("test"), notNullValue()); - assertThat(clusterStateResponse.getState().routingTable().index("test").allPrimaryShardsActive(), is(false)); - logger.info("--> change the recovery.initial_shards setting, and make sure its recovered"); - client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("recovery.initial_shards", 1)).get(); - - logger.info("--> running cluster_health (wait for the shards to startup), primaries only since we only have 1 node"); - clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForYellowStatus().waitForActiveShards(test.numPrimaries)).actionGet(); - logger.info("--> done cluster_health, status " + clusterHealth.getStatus()); - assertThat(clusterHealth.isTimedOut(), equalTo(false)); - assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW)); - - for (int i = 0; i < 10; i++) { - assertHitCount(client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get(), 2l); - } - } - public void testQuorumRecovery() throws Exception { logger.info("--> starting 3 nodes"); - internalCluster().startNodesAsync(3).get(); // we are shutting down nodes - make sure we don't have 2 clusters if we test network - setMinimumMasterNodes(2); + internalCluster().startNodesAsync(3, + Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2).build()).get(); + createIndex("test"); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java index 2184fda47c4..dbdf747de63 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryBackwardsCompatibilityIT.java @@ -82,9 +82,9 @@ public class RecoveryBackwardsCompatibilityIT extends ESBackcompatTestCase { SearchResponse countResponse = client().prepareSearch().setSize(0).get(); assertHitCount(countResponse, numDocs); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none")).execute().actionGet(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")).execute().actionGet(); backwardsCluster().upgradeAllNodes(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all")).execute().actionGet(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all")).execute().actionGet(); ensureGreen(); countResponse = client().prepareSearch().setSize(0).get(); diff --git a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java index 01c76b465a9..1a95b66817a 100644 --- a/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java +++ b/core/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayIT.java @@ -360,7 +360,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings() .setTransientSettings(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)) + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) .get(); logger.info("--> full cluster restart"); internalCluster().fullRestart(); @@ -369,7 +369,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { ensureGreen(); } else { logger.info("--> trying to sync flush"); - assertEquals(SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test").failedShards(), 0); + assertEquals(client().admin().indices().prepareSyncedFlush("test").get().failedShards(), 0); assertSyncIdsNotNull(); } @@ -377,7 +377,7 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase { // Disable allocations while we are closing nodes client().admin().cluster().prepareUpdateSettings() .setTransientSettings(settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE)) + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) .get(); logger.info("--> full cluster restart"); internalCluster().fullRestart(); diff --git a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index 9a053b36527..0818999ea7e 100644 --- a/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/core/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -43,9 +43,11 @@ import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import org.elasticsearch.test.ESAllocationTestCase; import org.junit.Before; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; @@ -275,13 +277,16 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { } private RoutingAllocation onePrimaryOnNode1And1Replica(AllocationDeciders deciders, Settings settings, UnassignedInfo.Reason reason) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT).put(settings)).numberOfShards(1).numberOfReplicas(0)) - .build(); + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT).put(settings)) + .numberOfShards(1).numberOfReplicas(1) + .putActiveAllocationIds(0, new HashSet<>(Arrays.asList(primaryShard.allocationId().getId())))) + .build(); RoutingTable routingTable = RoutingTable.builder() .add(IndexRoutingTable.builder(shardId.getIndex()) .addIndexShard(new IndexShardRoutingTable.Builder(shardId) - .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10)) + .addShard(primaryShard) .addShard(ShardRouting.newUnassigned(shardId.getIndex(), shardId.getId(), null, false, new UnassignedInfo(reason, null))) .build()) ) @@ -294,13 +299,16 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { } private RoutingAllocation onePrimaryOnNode1And1ReplicaRecovering(AllocationDeciders deciders) { + ShardRouting primaryShard = TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(0)) + .put(IndexMetaData.builder(shardId.getIndex()).settings(settings(Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(1) + .putActiveAllocationIds(0, new HashSet<>(Arrays.asList(primaryShard.allocationId().getId())))) .build(); RoutingTable routingTable = RoutingTable.builder() .add(IndexRoutingTable.builder(shardId.getIndex()) .addIndexShard(new IndexShardRoutingTable.Builder(shardId) - .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node1.id(), true, ShardRoutingState.STARTED, 10)) + .addShard(primaryShard) .addShard(TestShardRouting.newShardRouting(shardId.getIndex(), shardId.getId(), node2.id(), null, null, false, ShardRoutingState.INITIALIZING, 10, new UnassignedInfo(UnassignedInfo.Reason.CLUSTER_RECOVERED, null))) .build()) ) diff --git a/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java new file mode 100644 index 00000000000..936a6fa09a0 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/gateway/ReusePeerRecoverySharedTest.java @@ -0,0 +1,161 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.gateway; + +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.admin.indices.stats.IndexStats; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.indices.flush.SyncedFlushUtil; +import org.elasticsearch.indices.recovery.RecoveryState; + +import static org.elasticsearch.common.settings.Settings.settingsBuilder; +import static org.elasticsearch.test.ESIntegTestCase.client; +import static org.elasticsearch.test.ESIntegTestCase.internalCluster; +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; + +/** + * Test of file reuse on recovery shared between integration tests and backwards + * compatibility tests. + */ +public class ReusePeerRecoverySharedTest { + /** + * Test peer reuse on recovery. This is shared between RecoverFromGatewayIT + * and RecoveryBackwardsCompatibilityIT. + * + * @param indexSettings + * settings for the index to test + * @param restartCluster + * runnable that will restart the cluster under test + * @param logger + * logger for logging + * @param useSyncIds + * should this use synced flush? can't use synced from in the bwc + * tests + */ + public static void testCase(Settings indexSettings, Runnable restartCluster, ESLogger logger, boolean useSyncIds) { + /* + * prevent any rebalance actions during the peer recovery if we run into + * a relocation the reuse count will be 0 and this fails the test. We + * are testing here if we reuse the files on disk after full restarts + * for replicas. + */ + assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(indexSettings) + .put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE))); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + logger.info("--> indexing docs"); + for (int i = 0; i < 1000; i++) { + client().prepareIndex("test", "type").setSource("field", "value").execute().actionGet(); + if ((i % 200) == 0) { + client().admin().indices().prepareFlush().execute().actionGet(); + } + } + if (randomBoolean()) { + client().admin().indices().prepareFlush().execute().actionGet(); + } + logger.info("--> running cluster health"); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + // just wait for merges + client().admin().indices().prepareForceMerge("test").setMaxNumSegments(100).get(); + client().admin().indices().prepareFlush().setWaitIfOngoing(true).setForce(true).get(); + + if (useSyncIds == false) { + logger.info("--> disabling allocation while the cluster is shut down"); + + // Disable allocations while we are closing nodes + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)).get(); + logger.info("--> full cluster restart"); + restartCluster.run(); + + logger.info("--> waiting for cluster to return to green after first shutdown"); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + } else { + logger.info("--> trying to sync flush"); + assertEquals(client().admin().indices().prepareSyncedFlush("test").get().failedShards(), 0); + assertSyncIdsNotNull(); + } + + logger.info("--> disabling allocation while the cluster is shut down", useSyncIds ? "" : " a second time"); + // Disable allocations while we are closing nodes + client().admin().cluster().prepareUpdateSettings().setTransientSettings( + settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), EnableAllocationDecider.Allocation.NONE)) + .get(); + logger.info("--> full cluster restart"); + restartCluster.run(); + + logger.info("--> waiting for cluster to return to green after {}shutdown", useSyncIds ? "" : "second "); + client().admin().cluster().prepareHealth().setWaitForGreenStatus().setTimeout("30s").get(); + + if (useSyncIds) { + assertSyncIdsNotNull(); + } + RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries("test").get(); + for (RecoveryState recoveryState : recoveryResponse.shardRecoveryStates().get("test")) { + long recovered = 0; + for (RecoveryState.File file : recoveryState.getIndex().fileDetails()) { + if (file.name().startsWith("segments")) { + recovered += file.length(); + } + } + if (!recoveryState.getPrimary() && (useSyncIds == false)) { + logger.info("--> replica shard {} recovered from {} to {}, recovered {}, reuse {}", recoveryState.getShardId().getId(), + recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), + recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); + assertThat("no bytes should be recovered", recoveryState.getIndex().recoveredBytes(), equalTo(recovered)); + assertThat("data should have been reused", recoveryState.getIndex().reusedBytes(), greaterThan(0l)); + // we have to recover the segments file since we commit the translog ID on engine startup + assertThat("all bytes should be reused except of the segments file", recoveryState.getIndex().reusedBytes(), + equalTo(recoveryState.getIndex().totalBytes() - recovered)); + assertThat("no files should be recovered except of the segments file", recoveryState.getIndex().recoveredFileCount(), + equalTo(1)); + assertThat("all files should be reused except of the segments file", recoveryState.getIndex().reusedFileCount(), + equalTo(recoveryState.getIndex().totalFileCount() - 1)); + assertThat("> 0 files should be reused", recoveryState.getIndex().reusedFileCount(), greaterThan(0)); + } else { + if (useSyncIds && !recoveryState.getPrimary()) { + logger.info("--> replica shard {} recovered from {} to {} using sync id, recovered {}, reuse {}", + recoveryState.getShardId().getId(), recoveryState.getSourceNode().name(), recoveryState.getTargetNode().name(), + recoveryState.getIndex().recoveredBytes(), recoveryState.getIndex().reusedBytes()); + } + assertThat(recoveryState.getIndex().recoveredBytes(), equalTo(0l)); + assertThat(recoveryState.getIndex().reusedBytes(), equalTo(recoveryState.getIndex().totalBytes())); + assertThat(recoveryState.getIndex().recoveredFileCount(), equalTo(0)); + assertThat(recoveryState.getIndex().reusedFileCount(), equalTo(recoveryState.getIndex().totalFileCount())); + } + } + } + + public static void assertSyncIdsNotNull() { + IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test"); + for (ShardStats shardStats : indexStats.getShards()) { + assertNotNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java index 21ecdf710b7..457cf31ec83 100644 --- a/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java +++ b/core/src/test/java/org/elasticsearch/index/TransportIndexFailuresIT.java @@ -56,7 +56,7 @@ public class TransportIndexFailuresIT extends ESIntegTestCase { .put("discovery.type", "zen") // <-- To override the local setting if set externally .put(FaultDetection.SETTING_PING_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly .put(FaultDetection.SETTING_PING_RETRIES, "1") // <-- for hitting simulated network failures quickly - .put(DiscoverySettings.PUBLISH_TIMEOUT, "1s") // <-- for hitting simulated network failures quickly + .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") // <-- for hitting simulated network failures quickly .put("discovery.zen.minimum_master_nodes", 1) .build(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java index bfb7ead734d..bbba3432b66 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/all/SimpleAllMapperTests.java @@ -49,21 +49,12 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.hamcrest.Matchers; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; +import java.util.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.StreamsUtils.copyToBytesFromClasspath; import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasItem; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.*; public class SimpleAllMapperTests extends ESSingleNodeTestCase { @@ -251,7 +242,7 @@ public class SimpleAllMapperTests extends ESSingleNodeTestCase { if (randomBoolean()) { booleanOptionList.add(new Tuple<>("store_term_vector_payloads", tv_payloads = randomBoolean())); } - Collections.shuffle(booleanOptionList, getRandom()); + Collections.shuffle(booleanOptionList, random()); for (Tuple option : booleanOptionList) { mappingBuilder.field(option.v1(), option.v2().booleanValue()); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java index d94ae2b6735..2fe0cf9f218 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java @@ -31,7 +31,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; @@ -39,7 +38,6 @@ import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.util.Arrays; import java.util.List; import java.util.Map; @@ -321,9 +319,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase { DocumentMapper docMapperAfter = parser.parse(mappingAfter); - MergeResult mergeResult = docMapperBefore.merge(docMapperAfter.mapping(), true, false); - - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapperBefore.merge(docMapperAfter.mapping(), true, false); docMapperBefore.merge(docMapperAfter.mapping(), false, false); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java index b1224d5c6c7..1cb41480cb7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java @@ -19,15 +19,10 @@ package org.elasticsearch.index.mapper.core; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.CannedTokenStream; -import org.apache.lucene.analysis.MockTokenizer; -import org.apache.lucene.analysis.Token; -import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.*; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; @@ -64,13 +59,11 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase { .endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - MergeResult mergeResult = stage1.merge(stage2.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), true, false); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); - mergeResult = stage1.merge(stage2.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), false, false); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard")); } @@ -85,7 +78,7 @@ public class TokenCountFieldMapperTests extends ESSingleNodeTestCase { t2.setPositionIncrement(2); // Count funny tokens with more than one increment int finalTokenIncrement = 4; // Count the final token increment on the rare token streams that have them Token[] tokens = new Token[] {t1, t2, t3}; - Collections.shuffle(Arrays.asList(tokens), getRandom()); + Collections.shuffle(Arrays.asList(tokens), random()); final TokenStream tokenStream = new CannedTokenStream(finalTokenIncrement, 0, tokens); // TODO: we have no CannedAnalyzer? Analyzer analyzer = new Analyzer() { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java index fb67401e334..4772958bdb7 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java @@ -371,9 +371,8 @@ public class SimpleDateMappingTests extends ESSingleNodeTestCase { Map config = getConfigurationViaXContent(initialDateFieldMapper); assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")); - MergeResult mergeResult = defaultMapper.merge(mergeMapper.mapping(), false, false); + defaultMapper.merge(mergeMapper.mapping(), false, false); - assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.buildConflicts()), mergeResult.hasConflicts(), is(false)); assertThat(defaultMapper.mappers().getMapper("field"), is(instanceOf(DateFieldMapper.class))); DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().getMapper("field"); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index e5d08db8d9f..6f7541a272a 100755 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -34,7 +34,6 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.BinaryFieldMapper; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; @@ -82,7 +81,7 @@ public class ExternalMapper extends FieldMapper { private String mapperName; public Builder(String name, String generatedValue, String mapperName) { - super(name, new ExternalFieldType()); + super(name, new ExternalFieldType(), new ExternalFieldType()); this.builder = this; this.stringBuilder = stringField(name).store(false); this.generatedValue = generatedValue; @@ -96,9 +95,6 @@ public class ExternalMapper extends FieldMapper { @Override public ExternalMapper build(BuilderContext context) { - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(ContentPath.Type.FULL); - context.path().add(name); BinaryFieldMapper binMapper = binBuilder.build(context); BooleanFieldMapper boolMapper = boolBuilder.build(context); @@ -108,7 +104,6 @@ public class ExternalMapper extends FieldMapper { FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context); context.path().remove(); - context.path().pathType(origPathType); setupFieldType(context); return new ExternalMapper(name, fieldType, generatedValue, mapperName, binMapper, boolMapper, pointMapper, shapeMapper, stringMapper, @@ -219,7 +214,7 @@ public class ExternalMapper extends FieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + protected void doMerge(Mapper mergeWith, boolean updateAllTypes) { // ignore this for now } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java index dae8bc67fda..8bdb5670dbb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMetadataMapper.java @@ -28,7 +28,6 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MetadataFieldMapper; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; @@ -66,9 +65,9 @@ public class ExternalMetadataMapper extends MetadataFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeResult mergeResult) { + public void doMerge(Mapper mergeWith, boolean updateAllTypes) { if (!(mergeWith instanceof ExternalMetadataMapper)) { - mergeResult.addConflict("Trying to merge " + mergeWith + " with " + this); + throw new IllegalArgumentException("Trying to merge " + mergeWith + " with " + this); } } @@ -99,7 +98,7 @@ public class ExternalMetadataMapper extends MetadataFieldMapper { public static class Builder extends MetadataFieldMapper.Builder { protected Builder() { - super(CONTENT_TYPE, FIELD_TYPE); + super(CONTENT_TYPE, FIELD_TYPE, FIELD_TYPE); } @Override diff --git a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java index 4cf7b405217..7e519c3b722 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalValuesMapperIntegrationIT.java @@ -87,7 +87,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase { .startObject("f") .field("type", ExternalMapperPlugin.EXTERNAL_UPPER) .startObject("fields") - .startObject("f") + .startObject("g") .field("type", "string") .field("store", "yes") .startObject("fields") @@ -107,7 +107,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase { refresh(); SearchResponse response = client().prepareSearch("test-idx") - .setQuery(QueryBuilders.termQuery("f.f.raw", "FOO BAR")) + .setQuery(QueryBuilders.termQuery("f.g.raw", "FOO BAR")) .execute().actionGet(); assertThat(response.getHits().totalHits(), equalTo((long) 1)); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index 93fd71599c4..4efa12fca00 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.test.ESSingleNodeTestCase; diff --git a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java index 54e9e96f8ad..596efdcc273 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java @@ -30,17 +30,13 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ESSingleNodeTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.isIn; public class GeoShapeFieldMapperTests extends ESSingleNodeTestCase { public void testDefaultConfiguration() throws IOException { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java index 3f3c5702e8c..f97b22e0ecb 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java @@ -204,7 +204,7 @@ public class FieldNamesFieldMapperTests extends ESSingleNodeTestCase { @Override public Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { - return new MetadataFieldMapper.Builder("_dummy", FIELD_TYPE) { + return new MetadataFieldMapper.Builder("_dummy", FIELD_TYPE, FIELD_TYPE) { @Override public DummyMetadataFieldMapper build(BuilderContext context) { return new DummyMetadataFieldMapper(context.indexSettings()); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java index 1a66879c448..b2faf44e657 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; @@ -39,6 +38,7 @@ import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -59,15 +59,12 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { .endObject().endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - MergeResult mergeResult = stage1.merge(stage2.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), true, false); // since we are simulating, we should not have the age mapping assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue()); // now merge, don't simulate - mergeResult = stage1.merge(stage2.mapping(), false, false); - // there is still merge failures - assertThat(mergeResult.hasConflicts(), equalTo(false)); + stage1.merge(stage2.mapping(), false, false); // but we have the age in assertThat(stage1.mappers().smartNameFieldMapper("age"), notNullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), notNullValue()); @@ -83,8 +80,7 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { DocumentMapper withDynamicMapper = parser.parse(withDynamicMapping); assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); - MergeResult mergeResult = mapper.merge(withDynamicMapper.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); + mapper.merge(withDynamicMapper.mapping(), false, false); assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); } @@ -99,14 +95,19 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { .endObject().endObject().endObject().string(); DocumentMapper nestedMapper = parser.parse(nestedMapping); - MergeResult mergeResult = objectMapper.merge(nestedMapper.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.buildConflicts().length, equalTo(1)); - assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested")); + try { + objectMapper.merge(nestedMapper.mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from non-nested to nested")); + } - mergeResult = nestedMapper.merge(objectMapper.mapping(), true, false); - assertThat(mergeResult.buildConflicts().length, equalTo(1)); - assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested")); + try { + nestedMapper.merge(objectMapper.mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("object mapping [obj] can't be changed from nested to non-nested")); + } } public void testMergeSearchAnalyzer() throws Exception { @@ -122,9 +123,8 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); - MergeResult mergeResult = existing.merge(changed.mapping(), false, false); + existing.merge(changed.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("keyword")); } @@ -141,9 +141,8 @@ public class TestMergeMapperTests extends ESSingleNodeTestCase { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("whitespace")); - MergeResult mergeResult = existing.merge(changed.mapping(), false, false); + existing.merge(changed.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").fieldType().searchAnalyzer()).name(), equalTo("standard")); assertThat(((StringFieldMapper) (existing.mappers().getMapper("field"))).getIgnoreAbove(), equalTo(14)); } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java index a5a073d147f..58fa8fd69b0 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java @@ -479,7 +479,7 @@ public class MultiFieldTests extends ESSingleNodeTestCase { .startObject("my_field").field("type", "string").startObject("fields").startObject(MY_MULTI_FIELD) .field("type", "string").startObject("fielddata"); String[] keys = possibleSettings.keySet().toArray(new String[]{}); - Collections.shuffle(Arrays.asList(keys)); + Collections.shuffle(Arrays.asList(keys), random()); for(int i = randomIntBetween(0, possibleSettings.size()-1); i >= 0; --i) builder.field(keys[i], possibleSettings.get(keys[i])); builder.endObject().endObject().endObject().endObject().endObject().endObject().endObject(); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java index 30890dcd22a..83e10bd826c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java @@ -27,15 +27,11 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.test.ESSingleNodeTestCase; -import java.util.Arrays; - import static org.elasticsearch.test.StreamsUtils.copyToStringFromClasspath; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -62,8 +58,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json"); DocumentMapper docMapper2 = parser.parse(mapping); - MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true, false); - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapper.merge(docMapper2.mapping(), true, false); docMapper.merge(docMapper2.mapping(), false, false); @@ -84,8 +79,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json"); DocumentMapper docMapper3 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper3.mapping(), true, false); - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapper.merge(docMapper3.mapping(), true, false); docMapper.merge(docMapper3.mapping(), false, false); @@ -100,8 +94,7 @@ public class JavaMultiFieldMergeTests extends ESSingleNodeTestCase { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping4.json"); DocumentMapper docMapper4 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper4.mapping(), true, false); - assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); + docMapper.merge(docMapper4.mapping(), true, false); docMapper.merge(docMapper4.mapping(), false, false); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java deleted file mode 100644 index 7c1875be550..00000000000 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/CompressSourceMappingTests.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.mapper.source; - -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.test.ESSingleNodeTestCase; - -import static org.hamcrest.Matchers.equalTo; - -/** - * - */ -public class CompressSourceMappingTests extends ESSingleNodeTestCase { - Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - - public void testCompressDisabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress", false).endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2") - .endObject().bytes()); - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false)); - } - - public void testCompressEnabled() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress", true).endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2") - .endObject().bytes()); - - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true)); - } - - public void testCompressThreshold() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("compress_threshold", "200b").endObject() - .endObject().endObject().string(); - - DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); - - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .endObject().bytes()); - - BytesRef bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(false)); - - doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field1", "value1") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .field("field2", "value2 xxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyy zzzzzzzzzzzzzzzzz") - .endObject().bytes()); - - bytes = doc.rootDoc().getBinaryValue("_source"); - assertThat(CompressorFactory.isCompressed(new BytesArray(bytes)), equalTo(true)); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index 4ec0ff5211e..c30ea9bc6c6 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -31,13 +31,12 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.*; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.VersionUtils; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; public class DefaultSourceMappingTests extends ESSingleNodeTestCase { @@ -63,51 +62,16 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE)); } - public void testJsonFormat() throws Exception { + public void testFormatBackCompat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("format", "json").endObject() .endObject().endObject().string(); + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.V_2_2_0)) + .build(); - DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse(mapping); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); - - documentMapper = parser.parse(mapping); - doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); - } - - public void testJsonFormatCompressedBackcompat() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("format", "json").field("compress", true).endObject() - .endObject().endObject().string(); - - Settings backcompatSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); - DocumentMapperParser parser = createIndex("test", backcompatSettings).mapperService().documentMapperParser(); - DocumentMapper documentMapper = parser.parse(mapping); - ParsedDocument doc = documentMapper.parse("test", "type", "1", XContentFactory.jsonBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true)); - byte[] uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes(); - assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); - - documentMapper = parser.parse(mapping); - doc = documentMapper.parse("test", "type", "1", XContentFactory.smileBuilder().startObject() - .field("field", "value") - .endObject().bytes()); - - assertThat(CompressorFactory.isCompressed(doc.source()), equalTo(true)); - uncompressed = CompressorFactory.uncompressIfNeeded(doc.source()).toBytes(); - assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); + DocumentMapperParser parser = createIndex("test", settings).mapperService().documentMapperParser(); + parser.parse(mapping); // no exception } public void testIncludes() throws Exception { @@ -228,13 +192,18 @@ public class DefaultSourceMappingTests extends ESSingleNodeTestCase { void assertConflicts(String mapping1, String mapping2, DocumentMapperParser parser, String... conflicts) throws IOException { DocumentMapper docMapper = parser.parse(mapping1); docMapper = parser.parse(docMapper.mappingSource().string()); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true, false); - - List expectedConflicts = new ArrayList<>(Arrays.asList(conflicts)); - for (String conflict : mergeResult.buildConflicts()) { - assertTrue("found unexpected conflict [" + conflict + "]", expectedConflicts.remove(conflict)); + if (conflicts.length == 0) { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + } else { + try { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + for (String conflict : conflicts) { + assertThat(e.getMessage(), containsString(conflict)); + } + } } - assertTrue("missing conflicts: " + Arrays.toString(expectedConflicts.toArray()), expectedConflicts.isEmpty()); } public void testEnabledNotUpdateable() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index 9ac039a49fb..cadd9dd673c 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -40,7 +40,6 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.core.StringFieldMapper; @@ -493,8 +492,7 @@ public class SimpleStringMappingTests extends ESSingleNodeTestCase { String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", false).endObject() .endObject().endObject().endObject().endObject().string(); - MergeResult mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), false, false); - assertFalse(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts()); + defaultMapper.merge(parser.parse(updatedMapping).mapping(), false, false); doc = defaultMapper.parse("test", "type", "1", XContentFactory.jsonBuilder() .startObject() diff --git a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index 53a3bf7bb6e..d545452db0f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; @@ -515,8 +514,7 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "eager").field("format", "array").endObject().field("store", "yes").endObject() .endObject().endObject().string(); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), false, false); - assertThat(mergeResult.buildConflicts().length, equalTo(0)); + docMapper.merge(parser.parse(mapping).mapping(), false, false); assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getLoading(), equalTo(MappedFieldType.Loading.EAGER)); assertThat(docMapper.timestampFieldMapper().fieldType().fieldDataType().getFormat(indexSettings), equalTo("array")); } @@ -618,9 +616,9 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { .field("index", indexValues.remove(randomInt(2))) .endObject() .endObject().endObject().string(); - DocumentMapperParser parser = createIndex("test", BWC_SETTINGS).mapperService().documentMapperParser(); + MapperService mapperService = createIndex("test", BWC_SETTINGS).mapperService(); - DocumentMapper docMapper = parser.parse(mapping); + mapperService.merge("type", new CompressedXContent(mapping), true, false); mapping = XContentFactory.jsonBuilder().startObject() .startObject("type") .startObject("_timestamp") @@ -628,18 +626,11 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { .endObject() .endObject().endObject().string(); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true, false); - List expectedConflicts = new ArrayList<>(); - expectedConflicts.add("mapper [_timestamp] has different [index] values"); - expectedConflicts.add("mapper [_timestamp] has different [tokenize] values"); - if (indexValues.get(0).equals("not_analyzed") == false) { - // if the only index value left is not_analyzed, then the doc values setting will be the same, but in the - // other two cases, it will change - expectedConflicts.add("mapper [_timestamp] has different [doc_values] values"); - } - - for (String conflict : mergeResult.buildConflicts()) { - assertThat(conflict, isIn(expectedConflicts)); + try { + mapperService.merge("type", new CompressedXContent(mapping), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("mapper [_timestamp] has different [index] values")); } } @@ -686,10 +677,15 @@ public class TimestampMappingTests extends ESSingleNodeTestCase { void assertConflict(String mapping1, String mapping2, DocumentMapperParser parser, String conflict) throws IOException { DocumentMapper docMapper = parser.parse(mapping1); docMapper = parser.parse(docMapper.mappingSource().string()); - MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true, false); - assertThat(mergeResult.buildConflicts().length, equalTo(conflict == null ? 0 : 1)); - if (conflict != null) { - assertThat(mergeResult.buildConflicts()[0], containsString(conflict)); + if (conflict == null) { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + } else { + try { + docMapper.merge(parser.parse(mapping2).mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString(conflict)); + } } } diff --git a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java index efe07615532..444d692079a 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.internal.TTLFieldMapper; @@ -116,9 +115,8 @@ public class TTLMappingTests extends ESSingleNodeTestCase { DocumentMapper mapperWithoutTtl = parser.parse(mappingWithoutTtl); DocumentMapper mapperWithTtl = parser.parse(mappingWithTtl); - MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl.mapping(), false, false); + mapperWithoutTtl.merge(mapperWithTtl.mapping(), false, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(mapperWithoutTtl.TTLFieldMapper().enabled(), equalTo(true)); } @@ -141,9 +139,8 @@ public class TTLMappingTests extends ESSingleNodeTestCase { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(updatedMapping); - MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true, false); + initialMapper.merge(updatedMapper.mapping(), true, false); - assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); } @@ -154,9 +151,13 @@ public class TTLMappingTests extends ESSingleNodeTestCase { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(mappingWithTtlDisabled); - MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true, false); + try { + initialMapper.merge(updatedMapper.mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + } - assertThat(mergeResult.hasConflicts(), equalTo(true)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); } @@ -189,23 +190,20 @@ public class TTLMappingTests extends ESSingleNodeTestCase { public void testNoConflictIfNothingSetAndDisabledLater() throws Exception { IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlDisabled = getMappingWithTtlDisabled("7d"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean(), false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean(), false); } public void testNoConflictIfNothingSetAndEnabledLater() throws Exception { IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean(), false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean(), false); } public void testMergeWithOnlyDefaultSet() throws Exception { XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtlEnabled); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":360000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); } @@ -216,8 +214,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { CompressedXContent mappingAfterCreation = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterCreation, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithOnlyDefaultSet.string()), true).mapping(), false, false); CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); } @@ -228,8 +225,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { IndexService indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithTtl); CompressedXContent mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlDifferentDefault = getMappingWithTtlEnabled("7d"); - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDifferentDefault.string()), true).mapping(), true, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlDifferentDefault.string()), true).mapping(), true, false); // make sure simulate flag actually worked - no mappings applied CompressedXContent mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge)); @@ -240,8 +236,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled(); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge)); @@ -252,8 +247,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), true, false); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(mappingBeforeMerge)); @@ -263,8 +257,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { mappingWithoutTtl = getMappingWithTtlDisabled("6d"); indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); @@ -273,8 +266,7 @@ public class TTLMappingTests extends ESSingleNodeTestCase { // check if switching simulate flag off works if nothing was applied in the beginning indexService = createIndex("testindex", Settings.settingsBuilder().build(), "type"); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); - assertFalse(mergeResult.hasConflicts()); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingWithTtlEnabled.string()), true).mapping(), false, false); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterMerge, equalTo(new CompressedXContent("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":604800000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); diff --git a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java index abf5f4819cd..f73ad3e3b3f 100644 --- a/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java +++ b/core/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.test.ESSingleNodeTestCase; @@ -77,9 +76,7 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { private void testNoConflictWhileMergingAndMappingChanged(XContentBuilder mapping, XContentBuilder mappingUpdate, XContentBuilder expectedMapping) throws IOException { IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping); // simulate like in MetaDataMappingService#putMapping - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), false, false); - // assure we have no conflicts - assertThat(mergeResult.buildConflicts().length, equalTo(0)); + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), false, false); // make sure mappings applied CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate.toString(), equalTo(expectedMapping.string())); @@ -101,9 +98,12 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { IndexService indexService = createIndex("test", Settings.settingsBuilder().build(), "type", mapping); CompressedXContent mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource(); // simulate like in MetaDataMappingService#putMapping - MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), true, false); - // assure we have conflicts - assertThat(mergeResult.buildConflicts().length, equalTo(1)); + try { + indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedXContent(mappingUpdate.bytes()), true).mapping(), true, false); + fail(); + } catch (IllegalArgumentException e) { + // expected + } // make sure simulate flag actually worked - no mappings applied CompressedXContent mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate, equalTo(mappingBeforeUpdate)); @@ -202,6 +202,51 @@ public class UpdateMappingTests extends ESSingleNodeTestCase { assertNull(mapperService.documentMapper("type2").mapping().root().getMapper("foo")); } + public void testReuseMetaField() throws IOException { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("_id").field("type", "string").endObject() + .endObject().endObject().endObject(); + MapperService mapperService = createIndex("test", Settings.settingsBuilder().build()).mapperService(); + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + } + + public void testReuseMetaFieldBackCompat() throws IOException { + XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("_id").field("type", "string").endObject() + .endObject().endObject().endObject(); + // the logic is different for 2.x indices since they record some meta mappers (including _id) + // in the root object + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_2_1_0).build(); + MapperService mapperService = createIndex("test", settings).mapperService(); + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + + try { + mapperService.merge("type", new CompressedXContent(mapping.string()), false, false); + fail(); + } catch (IllegalArgumentException e) { + assertTrue(e.getMessage().contains("Field [_id] is defined twice in [type]")); + } + } + public void testIndexFieldParsingBackcompat() throws IOException { IndexService indexService = createIndex("test", Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build()); XContentBuilder indexMapping = XContentFactory.jsonBuilder(); diff --git a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java index aa97d722737..2d250ff0b95 100644 --- a/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java +++ b/core/src/test/java/org/elasticsearch/index/query/AbstractQueryTestCase.java @@ -47,6 +47,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.geo.builders.ShapeBuilderRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.ModulesBuilder; @@ -191,6 +192,7 @@ public abstract class AbstractQueryTestCase> // skip services bindQueryParsersExtension(); bindMapperExtension(); + bind(ShapeBuilderRegistry.class).asEagerSingleton(); } }, new ScriptModule(settings) { diff --git a/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java index 6b2088d25c6..0720303940a 100644 --- a/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/GeoShapeQueryBuilderTests.java @@ -59,14 +59,11 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase { - - @Override - protected MissingQueryBuilder doCreateTestQueryBuilder() { - String fieldName = randomBoolean() ? randomFrom(MAPPED_FIELD_NAMES) : randomAsciiOfLengthBetween(1, 10); - Boolean existence = randomBoolean(); - Boolean nullValue = randomBoolean(); - if (existence == false && nullValue == false) { - if (randomBoolean()) { - existence = true; - } else { - nullValue = true; - } - } - return new MissingQueryBuilder(fieldName, nullValue, existence); - } - - @Override - protected void doAssertLuceneQuery(MissingQueryBuilder queryBuilder, Query query, QueryShardContext context) throws IOException { - // too many mapping dependent cases to test, we don't want to end up - // duplication the toQuery method - } - - public void testIllegalArguments() { - try { - if (randomBoolean()) { - new MissingQueryBuilder("", true, true); - } else { - new MissingQueryBuilder(null, true, true); - } - fail("must not be null or empty"); - } catch (IllegalArgumentException e) { - // expected - } - - try { - new MissingQueryBuilder("fieldname", false, false); - fail("existence and nullValue cannot both be false"); - } catch (IllegalArgumentException e) { - // expected - } - - try { - new MissingQueryBuilder("fieldname", MissingQueryBuilder.DEFAULT_NULL_VALUE, false); - fail("existence and nullValue cannot both be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - - public void testBothNullValueAndExistenceFalse() throws IOException { - QueryShardContext context = createShardContext(); - context.setAllowUnmappedFields(true); - try { - MissingQueryBuilder.newFilter(context, "field", false, false); - fail("Expected QueryShardException"); - } catch (QueryShardException e) { - assertThat(e.getMessage(), containsString("missing must have either existence, or null_value")); - } - } - - public void testFromJson() throws IOException { - String json = - "{\n" + - " \"missing\" : {\n" + - " \"field\" : \"user\",\n" + - " \"null_value\" : false,\n" + - " \"existence\" : true,\n" + - " \"boost\" : 1.0\n" + - " }\n" + - "}"; - - MissingQueryBuilder parsed = (MissingQueryBuilder) parseQuery(json); - checkGeneratedJson(json, parsed); - - assertEquals(json, false, parsed.nullValue()); - assertEquals(json, true, parsed.existence()); - assertEquals(json, "user", parsed.fieldPattern()); - } -} diff --git a/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java b/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java index 7ba1d11815a..028987448f0 100644 --- a/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/QueryDSLDocumentationTests.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.index.query.QueryBuilders.indicesQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.moreLikeThisQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; @@ -240,10 +239,6 @@ public class QueryDSLDocumentationTests extends ESTestCase { matchQuery("name", "kimchy elasticsearch"); } - public void testMissing() { - missingQuery("user", true, true); - } - public void testMLT() { String[] fields = {"name.first", "name.last"}; String[] texts = {"text like this one"}; diff --git a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index bf580e55f46..4df799e9f37 100644 --- a/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -23,7 +23,9 @@ import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.lucene.BytesRefs; +import org.hamcrest.core.IsEqual; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -353,4 +355,42 @@ public class RangeQueryBuilderTests extends AbstractQueryTestCase hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode - meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), randomAllocationId()); + allocationId = randomBoolean() ? null : randomAllocationId(); + meta = new ShardStateMetaData(randomLong(), randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); hashCodes.add(meta.hashCode()); } assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); diff --git a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index 4858e0a6e3c..5a1aa2ef469 100644 --- a/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/core/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -19,26 +19,21 @@ package org.elasticsearch.index.store; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexFileNames; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Requests; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.*; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.elasticsearch.common.Nullable; @@ -48,7 +43,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; -import org.elasticsearch.discovery.Discovery; import org.elasticsearch.gateway.PrimaryShardAllocator; import org.elasticsearch.index.shard.*; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; @@ -75,14 +69,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.TreeSet; +import java.util.*; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -91,16 +78,8 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.Settings.settingsBuilder; import static org.elasticsearch.common.util.CollectionUtils.iterableAsArrayList; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public class CorruptedFileIT extends ESIntegTestCase { @@ -111,9 +90,9 @@ public class CorruptedFileIT extends ESIntegTestCase { // and we need to make sure primaries are not just trashed if we don't have replicas .put(super.nodeSettings(nodeOrdinal)) // speed up recoveries - .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, 10) - .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, 10) - .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, 5) + .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), 10) + .put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), 10) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 5) .build(); } @@ -320,7 +299,7 @@ public class CorruptedFileIT extends ESIntegTestCase { } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); NodeStats primariesNode = dataNodeStats.get(0); NodeStats unluckyNode = dataNodeStats.get(1); assertAcked(prepareCreate("test").setSettings(Settings.builder() @@ -380,7 +359,7 @@ public class CorruptedFileIT extends ESIntegTestCase { } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); NodeStats primariesNode = dataNodeStats.get(0); NodeStats unluckyNode = dataNodeStats.get(1); diff --git a/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java index aab980e975d..a29cc6cf8d0 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/BufferedTranslogTests.java @@ -34,13 +34,12 @@ import java.nio.file.Path; public class BufferedTranslogTests extends TranslogTests { @Override - protected Translog create(Path path) throws IOException { + protected TranslogConfig getTranslogConfig(Path path) { Settings build = Settings.settingsBuilder() .put("index.translog.fs.type", TranslogWriter.Type.BUFFERED.name()) .put("index.translog.fs.buffer_size", 10 + randomInt(128 * 1024), ByteSizeUnit.BYTES) .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) .build(); - TranslogConfig translogConfig = new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); - return new Translog(translogConfig); + return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); } } diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 26faa02a17d..8b3294c15b8 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -22,9 +22,12 @@ package org.elasticsearch.index.translog; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.index.Term; +import org.apache.lucene.mockfile.FilterFileChannel; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.ByteArrayDataOutput; +import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.LineFileDocs; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -110,16 +113,19 @@ public class TranslogTests extends ESTestCase { } } - protected Translog create(Path path) throws IOException { + private Translog create(Path path) throws IOException { + return new Translog(getTranslogConfig(path)); + } + + protected TranslogConfig getTranslogConfig(Path path) { Settings build = Settings.settingsBuilder() .put(TranslogConfig.INDEX_TRANSLOG_FS_TYPE, TranslogWriter.Type.SIMPLE.name()) .put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) .build(); - TranslogConfig translogConfig = new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); - return new Translog(translogConfig); + return new TranslogConfig(shardId, path, IndexSettingsModule.newIndexSettings(shardId.index(), build), Translog.Durabilty.REQUEST, BigArrays.NON_RECYCLING_INSTANCE, null); } - protected void addToTranslogAndList(Translog translog, ArrayList list, Translog.Operation op) { + protected void addToTranslogAndList(Translog translog, ArrayList list, Translog.Operation op) throws IOException { list.add(op); translog.add(op); } @@ -330,7 +336,7 @@ public class TranslogTests extends ESTestCase { } } - public void testSnapshot() { + public void testSnapshot() throws IOException { ArrayList ops = new ArrayList<>(); Translog.Snapshot snapshot = translog.newSnapshot(); assertThat(snapshot, SnapshotMatchers.size(0)); @@ -389,7 +395,7 @@ public class TranslogTests extends ESTestCase { Translog.Snapshot snapshot = translog.newSnapshot(); fail("translog is closed"); } catch (AlreadyClosedException ex) { - assertThat(ex.getMessage(), containsString("translog-1.tlog is already closed can't increment")); + assertEquals(ex.getMessage(), "translog is already closed"); } } @@ -634,7 +640,7 @@ public class TranslogTests extends ESTestCase { final String threadId = "writer_" + i; writers[i] = new Thread(new AbstractRunnable() { @Override - public void doRun() throws BrokenBarrierException, InterruptedException { + public void doRun() throws BrokenBarrierException, InterruptedException, IOException { barrier.await(); int counter = 0; while (run.get()) { @@ -1237,11 +1243,11 @@ public class TranslogTests extends ESTestCase { private final CountDownLatch downLatch; private final int opsPerThread; private final int threadId; - private final BlockingQueue writtenOperations; + private final Collection writtenOperations; private final Throwable[] threadExceptions; private final Translog translog; - public TranslogThread(Translog translog, CountDownLatch downLatch, int opsPerThread, int threadId, BlockingQueue writtenOperations, Throwable[] threadExceptions) { + public TranslogThread(Translog translog, CountDownLatch downLatch, int opsPerThread, int threadId, Collection writtenOperations, Throwable[] threadExceptions) { this.translog = translog; this.downLatch = downLatch; this.opsPerThread = opsPerThread; @@ -1271,12 +1277,302 @@ public class TranslogTests extends ESTestCase { throw new ElasticsearchException("not supported op type"); } - Translog.Location loc = translog.add(op); + Translog.Location loc = add(op); writtenOperations.add(new LocationOperation(op, loc)); + afterAdd(); } } catch (Throwable t) { threadExceptions[threadId] = t; } } + + protected Translog.Location add(Translog.Operation op) throws IOException { + return translog.add(op); + } + + protected void afterAdd() throws IOException {} + } + + public void testFailFlush() throws IOException { + Path tempDir = createTempDir(); + final AtomicBoolean fail = new AtomicBoolean(); + TranslogConfig config = getTranslogConfig(tempDir); + Translog translog = getFailableTranslog(fail, config); + + List locations = new ArrayList<>(); + int opsSynced = 0; + boolean failed = false; + while(failed == false) { + try { + locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + translog.sync(); + opsSynced++; + } catch (MockDirectoryWrapper.FakeIOException ex) { + failed = true; + assertFalse(translog.isOpen()); + } catch (IOException ex) { + failed = true; + assertFalse(translog.isOpen()); + assertEquals("__FAKE__ no space left on device", ex.getMessage()); + } + fail.set(randomBoolean()); + } + fail.set(false); + if (randomBoolean()) { + try { + locations.add(translog.add(new Translog.Index("test", "" + opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + fail("we are already closed"); + } catch (AlreadyClosedException ex) { + assertNotNull(ex.getCause()); + if (ex.getCause() instanceof MockDirectoryWrapper.FakeIOException) { + assertNull(ex.getCause().getMessage()); + } else { + assertEquals(ex.getCause().getMessage(), "__FAKE__ no space left on device"); + } + } + + } + Translog.TranslogGeneration translogGeneration = translog.getGeneration(); + try { + translog.newSnapshot(); + fail("already closed"); + } catch (AlreadyClosedException ex) { + // all is well + assertNotNull(ex.getCause()); + assertSame(translog.getTragicException(), ex.getCause()); + } + + try { + translog.commit(); + fail("already closed"); + } catch (AlreadyClosedException ex) { + assertNotNull(ex.getCause()); + assertSame(translog.getTragicException(), ex.getCause()); + } + + assertFalse(translog.isOpen()); + translog.close(); // we are closed + config.setTranslogGeneration(translogGeneration); + try (Translog tlog = new Translog(config)){ + assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); + assertFalse(tlog.syncNeeded()); + + try (Translog.Snapshot snapshot = tlog.newSnapshot()) { + assertEquals(opsSynced, snapshot.estimatedTotalOperations()); + for (int i = 0; i < opsSynced; i++) { + assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, locations.get(i).generation); + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + assertEquals(i, Integer.parseInt(next.getSource().source.toUtf8())); + } + } + } + } + + public void testTranslogOpsCountIsCorrect() throws IOException { + List locations = new ArrayList<>(); + int numOps = randomIntBetween(100, 200); + LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly + for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { + locations.add(translog.add(new Translog.Index("test", "" + opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); + try (Translog.Snapshot snapshot = translog.newSnapshot()) { + assertEquals(opsAdded+1, snapshot.estimatedTotalOperations()); + for (int i = 0; i < opsAdded; i++) { + assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(), locations.get(i).generation); + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + } + } + } + } + + public void testTragicEventCanBeAnyException() throws IOException { + Path tempDir = createTempDir(); + final AtomicBoolean fail = new AtomicBoolean(); + TranslogConfig config = getTranslogConfig(tempDir); + assumeFalse("this won't work if we sync on any op",config.isSyncOnEachOperation()); + Translog translog = getFailableTranslog(fail, config, false, true); + LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly + translog.add(new Translog.Index("test", "1", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + fail.set(true); + try { + Translog.Location location = translog.add(new Translog.Index("test", "2", lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + if (config.getType() == TranslogWriter.Type.BUFFERED) { // the buffered case will fail on the add if we exceed the buffer or will fail on the flush once we sync + if (randomBoolean()) { + translog.ensureSynced(location); + } else { + translog.sync(); + } + } + //TODO once we have a mock FS that can simulate we can also fail on plain sync + fail("WTF"); + } catch (UnknownException ex) { + // w00t + } catch (TranslogException ex) { + assertTrue(ex.getCause() instanceof UnknownException); + } + assertFalse(translog.isOpen()); + assertTrue(translog.getTragicException() instanceof UnknownException); + } + + public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, InterruptedException { + Path tempDir = createTempDir(); + final AtomicBoolean fail = new AtomicBoolean(false); + + TranslogConfig config = getTranslogConfig(tempDir); + Translog translog = getFailableTranslog(fail, config); + + final int threadCount = randomIntBetween(1, 5); + Thread[] threads = new Thread[threadCount]; + final Throwable[] threadExceptions = new Throwable[threadCount]; + final CountDownLatch downLatch = new CountDownLatch(1); + final CountDownLatch added = new CountDownLatch(randomIntBetween(10, 100)); + List writtenOperations = Collections.synchronizedList(new ArrayList<>()); + for (int i = 0; i < threadCount; i++) { + final int threadId = i; + threads[i] = new TranslogThread(translog, downLatch, 200, threadId, writtenOperations, threadExceptions) { + @Override + protected Translog.Location add(Translog.Operation op) throws IOException { + Translog.Location add = super.add(op); + added.countDown(); + return add; + } + + @Override + protected void afterAdd() throws IOException { + if (randomBoolean()) { + translog.sync(); + } + } + }; + threads[i].setDaemon(true); + threads[i].start(); + } + downLatch.countDown(); + added.await(); + try (Translog.View view = translog.newView()) { + // this holds a reference to the current tlog channel such that it's not closed + // if we hit a tragic event. this is important to ensure that asserts inside the Translog#add doesn't trip + // otherwise our assertions here are off by one sometimes. + fail.set(true); + for (int i = 0; i < threadCount; i++) { + threads[i].join(); + } + boolean atLeastOneFailed = false; + for (Throwable ex : threadExceptions) { + assertTrue(ex.toString(), ex instanceof IOException || ex instanceof AlreadyClosedException); + if (ex != null) { + atLeastOneFailed = true; + } + } + if (atLeastOneFailed == false) { + try { + boolean syncNeeded = translog.syncNeeded(); + translog.close(); + assertFalse("should have failed if sync was needed", syncNeeded); + } catch (IOException ex) { + // boom now we failed + } + } + Collections.sort(writtenOperations, (a, b) -> a.location.compareTo(b.location)); + assertFalse(translog.isOpen()); + final Checkpoint checkpoint = Checkpoint.read(config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME)); + Iterator iterator = writtenOperations.iterator(); + while (iterator.hasNext()) { + LocationOperation next = iterator.next(); + if (checkpoint.offset < (next.location.translogLocation + next.location.size)) { + // drop all that haven't been synced + iterator.remove(); + } + } + config.setTranslogGeneration(translog.getGeneration()); + try (Translog tlog = new Translog(config)) { + try (Translog.Snapshot snapshot = tlog.newSnapshot()) { + if (writtenOperations.size() != snapshot.estimatedTotalOperations()) { + for (int i = 0; i < threadCount; i++) { + if (threadExceptions[i] != null) + threadExceptions[i].printStackTrace(); + } + } + assertEquals(writtenOperations.size(), snapshot.estimatedTotalOperations()); + for (int i = 0; i < writtenOperations.size(); i++) { + assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation); + Translog.Operation next = snapshot.next(); + assertNotNull("operation " + i + " must be non-null", next); + assertEquals(next, writtenOperations.get(i).operation); + } + } + } + } + } + private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config) throws IOException { + return getFailableTranslog(fail, config, randomBoolean(), false); + } + + private Translog getFailableTranslog(final AtomicBoolean fail, final TranslogConfig config, final boolean paritalWrites, final boolean throwUnknownException) throws IOException { + return new Translog(config) { + @Override + TranslogWriter.ChannelFactory getChannelFactory() { + final TranslogWriter.ChannelFactory factory = super.getChannelFactory(); + + return new TranslogWriter.ChannelFactory() { + @Override + public FileChannel open(Path file) throws IOException { + FileChannel channel = factory.open(file); + return new ThrowingFileChannel(fail, paritalWrites, throwUnknownException, channel); + } + }; + } + }; + } + + public static class ThrowingFileChannel extends FilterFileChannel { + private final AtomicBoolean fail; + private final boolean partialWrite; + private final boolean throwUnknownException; + + public ThrowingFileChannel(AtomicBoolean fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) { + super(delegate); + this.fail = fail; + this.partialWrite = partialWrite; + this.throwUnknownException = throwUnknownException; + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public int write(ByteBuffer src, long position) throws IOException { + throw new UnsupportedOperationException(); + } + + + public int write(ByteBuffer src) throws IOException { + if (fail.get()) { + if (partialWrite) { + if (src.hasRemaining()) { + final int pos = src.position(); + final int limit = src.limit(); + src.limit(randomIntBetween(pos, limit)); + super.write(src); + src.limit(limit); + src.position(pos); + throw new IOException("__FAKE__ no space left on device"); + } + } + if (throwUnknownException) { + throw new UnknownException(); + } else { + throw new MockDirectoryWrapper.FakeIOException(); + } + } + return super.write(src); + } + } + + private static final class UnknownException extends RuntimeException { + } } diff --git a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java index 8de3af25827..4f6aaf25705 100644 --- a/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java +++ b/core/src/test/java/org/elasticsearch/indices/IndicesLifecycleListenerIT.java @@ -170,14 +170,14 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase { //add a node: 3 out of the 6 shards will be relocated to it //disable allocation before starting a new node, as we need to register the listener first assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); String node2 = internalCluster().startNode(); IndexShardStateChangeListener stateChangeListenerNode2 = new IndexShardStateChangeListener(); //add a listener that keeps track of the shard state changes internalCluster().getInstance(MockIndexEventListener.TestEventListener.class, node2).setNewDelegate(stateChangeListenerNode2); //re-enable allocation assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all"))); + .setPersistentSettings(builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"))); ensureGreen(); //the 3 relocated shards get closed on the first node diff --git a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index aa8c9f18c01..9ff0df4d390 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.ShardRouting; @@ -99,7 +100,7 @@ public class FlushIT extends ESIntegTestCase { result = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), new ShardId("test", 0)); } else { logger.info("--> sync flushing index [test]"); - IndicesSyncedFlushResult indicesResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test"); + SyncedFlushResponse indicesResult = client().admin().indices().prepareSyncedFlush("test").get(); result = indicesResult.getShardsResultPerIndex().get("test").get(0); } assertFalse(result.failed()); @@ -171,7 +172,7 @@ public class FlushIT extends ESIntegTestCase { assertNull(shardStats.getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID)); } logger.info("--> trying sync flush"); - IndicesSyncedFlushResult syncedFlushResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test"); + SyncedFlushResponse syncedFlushResult = client().admin().indices().prepareSyncedFlush("test").get(); logger.info("--> sync flush done"); stop.set(true); indexingThread.join(); @@ -191,7 +192,7 @@ public class FlushIT extends ESIntegTestCase { for (final ShardStats shardStats : shardsStats) { for (final ShardsSyncedFlushResult shardResult : syncedFlushResults) { if (shardStats.getShardRouting().getId() == shardResult.shardId().getId()) { - for (Map.Entry singleResponse : shardResult.shardResponses().entrySet()) { + for (Map.Entry singleResponse : shardResult.shardResponses().entrySet()) { if (singleResponse.getKey().currentNodeId().equals(shardStats.getShardRouting().currentNodeId())) { if (singleResponse.getValue().success()) { logger.info("{} sync flushed on node {}", singleResponse.getKey().shardId(), singleResponse.getKey().currentNodeId()); @@ -212,7 +213,7 @@ public class FlushIT extends ESIntegTestCase { prepareCreate("test").setSettings(Settings.builder().put("index.routing.allocation.include._name", "nonexistent")).get(); // this should not hang but instead immediately return with empty result set - List shardsResult = SyncedFlushUtil.attemptSyncedFlush(internalCluster(), "test").getShardsResultPerIndex().get("test"); + List shardsResult = client().admin().indices().prepareSyncedFlush("test").get().getShardsResultPerIndex().get("test"); // just to make sure the test actually tests the right thing int numShards = client().admin().indices().prepareGetSettings("test").get().getIndexToSettings().get("test").getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, -1); assertThat(shardsResult.size(), equalTo(numShards)); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index 1a4bf8fd3f7..e4c9cb8a7ef 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -98,7 +98,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { assertNotNull(syncedFlushResult); assertEquals(1, syncedFlushResult.successfulShards()); assertEquals(1, syncedFlushResult.totalShards()); - SyncedFlushService.SyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next(); + SyncedFlushService.ShardSyncedFlushResponse response = syncedFlushResult.shardResponses().values().iterator().next(); assertTrue(response.success()); } @@ -157,7 +157,7 @@ public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { assertNull(listener.result); assertEquals("no such index", listener.error.getMessage()); } - + public void testFailAfterIntermediateCommit() throws InterruptedException { createIndex("test"); client().prepareIndex("test", "test", "1").setSource("{}").get(); diff --git a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java index fef6c23231e..fdabaf6b5a8 100644 --- a/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ b/core/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java @@ -20,6 +20,7 @@ package org.elasticsearch.indices.flush; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.routing.ShardRouting; @@ -31,6 +32,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import static org.elasticsearch.test.ESIntegTestCase.client; +import static org.elasticsearch.test.ESTestCase.randomBoolean; + /** Utils for SyncedFlush */ public class SyncedFlushUtil { @@ -38,25 +42,6 @@ public class SyncedFlushUtil { } - /** - * Blocking single index version of {@link SyncedFlushService#attemptSyncedFlush(String[], IndicesOptions, ActionListener)} - */ - public static IndicesSyncedFlushResult attemptSyncedFlush(InternalTestCluster cluster, String index) { - SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); - LatchedListener listener = new LatchedListener(); - service.attemptSyncedFlush(new String[]{index}, IndicesOptions.lenientExpandOpen(), listener); - try { - listener.latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - if (listener.error != null) { - throw ExceptionsHelper.convertToElastic(listener.error); - } - return listener.result; - } - - /** * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} */ @@ -109,5 +94,4 @@ public class SyncedFlushUtil { } return listener.result; } - } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java index 3398839b905..b1b56acd8cd 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerNoopIT.java @@ -41,12 +41,12 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, "noop") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") // This is set low, because if the "noop" is not a noop, it will break - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, "noop") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop") // This is set low, because if the "noop" is not a noop, it will break - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .build(); } diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java index fcd94d99585..1af04e295dd 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceIT.java @@ -63,13 +63,13 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { private void reset() { logger.info("--> resetting breaker settings"); Settings resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.DEFAULT_FIELDDATA_BREAKER_LIMIT) - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, - HierarchyCircuitBreakerService.DEFAULT_FIELDDATA_OVERHEAD_CONSTANT) - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, - HierarchyCircuitBreakerService.DEFAULT_REQUEST_BREAKER_LIMIT) - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getDefault(null)) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), + HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getDefault(null)) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), + HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getDefault(null)) + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); } @@ -119,8 +119,8 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Update circuit breaker settings Settings settings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "100b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.05) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -168,8 +168,8 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Update circuit breaker settings Settings settings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "100b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.05) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.05) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -213,8 +213,8 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { .getNodes()[0].getBreaker().getStats(CircuitBreaker.REQUEST).getLimit(); Settings resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0) + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); @@ -234,9 +234,9 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Adjust settings so the parent breaker will fail, but the fielddata breaker doesn't resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING, "15b") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING, "90%") - .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING, 1.0) + .put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "15b") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "90%") + .put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0) .build(); client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings).execute().actionGet(); @@ -261,7 +261,7 @@ public class CircuitBreakerServiceIT extends ESIntegTestCase { // Make request breaker limited to a small amount Settings resetSettings = settingsBuilder() - .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, "10b") + .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b") .build(); assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings)); diff --git a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java index 741ea305254..212d7ecbb7b 100644 --- a/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java +++ b/core/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerUnitTests.java @@ -19,12 +19,12 @@ package org.elasticsearch.indices.memory.breaker; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.breaker.BreakerSettings; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import static org.hamcrest.Matchers.equalTo; @@ -66,7 +66,7 @@ public class CircuitBreakerUnitTests extends ESTestCase { } public void testRegisterCustomBreaker() throws Exception { - CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new NodeSettingsService(Settings.EMPTY)); + CircuitBreakerService service = new HierarchyCircuitBreakerService(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); String customName = "custom"; BreakerSettings settings = new BreakerSettings(customName, 20, 1.0); service.registerBreaker(settings); diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index 4cf60289a18..2b8f5ea6388 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -145,7 +145,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() // one chunk per sec.. - .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, chunkSize, ByteSizeUnit.BYTES) + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), chunkSize, ByteSizeUnit.BYTES) ) .get().isAcknowledged()); } @@ -156,7 +156,7 @@ public class IndexRecoveryIT extends ESIntegTestCase { } assertTrue(client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder() - .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, "20mb") + .put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), "20mb") ) .get().isAcknowledged()); } @@ -529,8 +529,8 @@ public class IndexRecoveryIT extends ESIntegTestCase { public void testDisconnectsWhileRecovering() throws Exception { final String indexName = "test"; final Settings nodeSettings = Settings.builder() - .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, "100ms") - .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, "1s") + .put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), "100ms") + .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), "1s") .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again .build(); // start a master node diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 8346003287c..75a2b14060b 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.settings.Settings; @@ -44,7 +45,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.CorruptionUtils; import org.elasticsearch.test.DummyShardLock; import org.elasticsearch.test.ESTestCase; @@ -60,7 +60,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public class RecoverySourceHandlerTests extends ESTestCase { private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings(new Index("index"), Settings.settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build()); private final ShardId shardId = new ShardId(INDEX_SETTINGS.getIndex(), 1); - private final NodeSettingsService service = new NodeSettingsService(Settings.EMPTY); + private final ClusterSettings service = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); public void testSendFiles() throws Throwable { Settings settings = Settings.builder().put("indices.recovery.concurrent_streams", 1). diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java index fea8f0fdf14..8b23354ebb8 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTests.java @@ -26,13 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.transport.DummyTransportAddress; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.indices.recovery.RecoveryState.File; -import org.elasticsearch.indices.recovery.RecoveryState.Index; -import org.elasticsearch.indices.recovery.RecoveryState.Stage; -import org.elasticsearch.indices.recovery.RecoveryState.Timer; -import org.elasticsearch.indices.recovery.RecoveryState.Translog; -import org.elasticsearch.indices.recovery.RecoveryState.Type; -import org.elasticsearch.indices.recovery.RecoveryState.VerifyIndex; +import org.elasticsearch.indices.recovery.RecoveryState.*; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -43,14 +37,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.VersionUtils.randomVersion; -import static org.hamcrest.Matchers.arrayContainingInAnyOrder; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.either; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.*; public class RecoveryStateTests extends ESTestCase { abstract class Streamer extends Thread { @@ -201,7 +188,7 @@ public class RecoveryStateTests extends ESTestCase { } } - Collections.shuffle(Arrays.asList(files)); + Collections.shuffle(Arrays.asList(files), random()); final RecoveryState.Index index = new RecoveryState.Index(); if (randomBoolean()) { diff --git a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index bde40aa928f..8ec629dbbdc 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -40,7 +40,7 @@ public class CloseIndexDisableCloseAllIT extends ESIntegTestCase { // The cluster scope is test b/c we can't clear cluster settings. public void testCloseAllRequiresName() { Settings clusterSettings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(clusterSettings)); createIndex("test1", "test2", "test3"); @@ -91,7 +91,7 @@ public class CloseIndexDisableCloseAllIT extends ESIntegTestCase { createIndex("test_no_close"); healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(TransportCloseIndexAction.SETTING_CLUSTER_INDICES_CLOSE_ENABLE, false)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(TransportCloseIndexAction.CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey(), false)).get(); try { client.admin().indices().prepareClose("test_no_close").execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java index 96611aeca8a..2e73a466677 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/RareClusterStateIT.java @@ -171,7 +171,7 @@ public class RareClusterStateIT extends ESIntegTestCase { ensureGreen("test"); // now that the cluster is stable, remove publishing timeout - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0"))); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0"))); Set nodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames())); nodes.remove(internalCluster().getMasterName()); @@ -200,7 +200,7 @@ public class RareClusterStateIT extends ESIntegTestCase { // but the change might not be on the node that performed the indexing // operation yet - Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "0ms").build(); + Settings settings = Settings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "0ms").build(); final List nodeNames = internalCluster().startNodesAsync(2, settings).get(); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); diff --git a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java index a5cfa816455..af9cfeb94c1 100644 --- a/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateIT.java @@ -97,7 +97,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase { client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); } - public void testFastCloseAfterCreateDoesNotClose() { + public void testFastCloseAfterCreateContinuesCreateAfterOpen() { logger.info("--> creating test index that cannot be allocated"); client().admin().indices().prepareCreate("test").setSettings(Settings.settingsBuilder() .put("index.routing.allocation.include.tag", "no_such_node").build()).get(); @@ -106,17 +106,14 @@ public class SimpleIndexStateIT extends ESIntegTestCase { assertThat(health.isTimedOut(), equalTo(false)); assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED)); - try { - client().admin().indices().prepareClose("test").get(); - fail("Exception should have been thrown"); - } catch(IndexPrimaryShardNotAllocatedException e) { - // expected - } + client().admin().indices().prepareClose("test").get(); logger.info("--> updating test index settings to allow allocation"); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.settingsBuilder() .put("index.routing.allocation.include.tag", "").build()).get(); + client().admin().indices().prepareOpen("test").get(); + logger.info("--> waiting for green status"); ensureGreen(); diff --git a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java index a87da6fc046..4bdd972ea9c 100644 --- a/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java +++ b/core/src/test/java/org/elasticsearch/indices/stats/IndexStatsIT.java @@ -23,8 +23,8 @@ import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; import org.apache.lucene.util.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; -import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; +import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -46,11 +46,12 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.shard.MergePolicyConfig; import org.elasticsearch.index.shard.MergeSchedulerConfig; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.indices.cache.request.IndicesRequestCache; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; +import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; import java.util.EnumSet; @@ -315,7 +316,7 @@ public class IndexStatsIT extends ESIntegTestCase { .put(MergeSchedulerConfig.MAX_THREAD_COUNT, "1") .put(MergeSchedulerConfig.MAX_MERGE_COUNT, "1") .put("index.merge.policy.type", "tiered") - + .put(TranslogConfig.INDEX_TRANSLOG_DURABILITY, "ASYNC") )); ensureGreen(); long termUpto = 0; diff --git a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java index fc4dd4f6487..948b76b963d 100644 --- a/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationIT.java @@ -303,7 +303,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable allocation to control the situation more easily assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.debug("--> shutting down two random nodes"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1, node2, node3)); @@ -322,7 +322,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { .put(FilterAllocationDecider.INDEX_ROUTING_EXCLUDE_GROUP + "_name", "NONE"))); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"))); logger.debug("--> waiting for shards to recover on [{}]", node4); // we have to do this in two steps as we now do async shard fetching before assigning, so the change to the @@ -340,7 +340,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable allocation again to control concurrency a bit and allow shard active to kick in before allocation assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.debug("--> starting the two old nodes back"); @@ -351,7 +351,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "all"))); + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "all"))); logger.debug("--> waiting for the lost shard to be recovered"); @@ -396,7 +396,7 @@ public class IndicesStoreIntegrationIT extends ESIntegTestCase { // disable relocations when we do this, to make sure the shards are not relocated from node2 // due to rebalancing, and delete its content - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE)).get(); + client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder().put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)).get(); internalCluster().getInstance(ClusterService.class, nonMasterNode).submitStateUpdateTask("test", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) throws Exception { diff --git a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index ca2025ced1b..b32cfef76b6 100644 --- a/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/core/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -560,7 +560,7 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase { .setOrder(0) .addAlias(new Alias("alias1")) .addAlias(new Alias("{index}-alias")) - .addAlias(new Alias("alias3").filter(QueryBuilders.missingQuery("test"))) + .addAlias(new Alias("alias3").filter(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("test")))) .addAlias(new Alias("alias4")).get(); client().admin().indices().preparePutTemplate("template2") diff --git a/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java new file mode 100644 index 00000000000..693ba4a2eba --- /dev/null +++ b/core/src/test/java/org/elasticsearch/nodesinfo/NodeInfoStreamingTests.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nodesinfo; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; +import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.http.HttpInfo; +import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.monitor.os.DummyOsInfo; +import org.elasticsearch.monitor.os.OsInfo; +import org.elasticsearch.monitor.process.ProcessInfo; +import org.elasticsearch.plugins.DummyPluginInfo; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.ThreadPoolInfo; +import org.elasticsearch.transport.TransportInfo; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.core.IsEqual.equalTo; + +/** + * + */ +public class NodeInfoStreamingTests extends ESTestCase { + + public void testNodeInfoStreaming() throws IOException { + NodeInfo nodeInfo = createNodeInfo(); + Version version = Version.CURRENT; + BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(version); + nodeInfo.writeTo(out); + out.close(); + StreamInput in = StreamInput.wrap(out.bytes()); + in.setVersion(version); + NodeInfo readNodeInfo = NodeInfo.readNodeInfo(in); + assertExpectedUnchanged(nodeInfo, readNodeInfo); + + } + // checks all properties that are expected to be unchanged. Once we start changing them between versions this method has to be changed as well + private void assertExpectedUnchanged(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException { + assertThat(nodeInfo.getBuild().toString(), equalTo(readNodeInfo.getBuild().toString())); + assertThat(nodeInfo.getHostname(), equalTo(readNodeInfo.getHostname())); + assertThat(nodeInfo.getVersion(), equalTo(readNodeInfo.getVersion())); + assertThat(nodeInfo.getServiceAttributes().size(), equalTo(readNodeInfo.getServiceAttributes().size())); + for (Map.Entry entry : nodeInfo.getServiceAttributes().entrySet()) { + assertNotNull(readNodeInfo.getServiceAttributes().get(entry.getKey())); + assertThat(readNodeInfo.getServiceAttributes().get(entry.getKey()), equalTo(entry.getValue())); + } + compareJsonOutput(nodeInfo.getHttp(), readNodeInfo.getHttp()); + compareJsonOutput(nodeInfo.getJvm(), readNodeInfo.getJvm()); + compareJsonOutput(nodeInfo.getProcess(), readNodeInfo.getProcess()); + compareJsonOutput(nodeInfo.getSettings(), readNodeInfo.getSettings()); + compareJsonOutput(nodeInfo.getThreadPool(), readNodeInfo.getThreadPool()); + compareJsonOutput(nodeInfo.getTransport(), readNodeInfo.getTransport()); + compareJsonOutput(nodeInfo.getNode(), readNodeInfo.getNode()); + compareJsonOutput(nodeInfo.getOs(), readNodeInfo.getOs()); + comparePluginsAndModules(nodeInfo, readNodeInfo); + } + + private void comparePluginsAndModules(NodeInfo nodeInfo, NodeInfo readNodeInfo) throws IOException { + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder pluginsAndModules = jsonBuilder(); + pluginsAndModules.startObject(); + nodeInfo.getPlugins().toXContent(pluginsAndModules, params); + pluginsAndModules.endObject(); + XContentBuilder readPluginsAndModules = jsonBuilder(); + readPluginsAndModules.startObject(); + readNodeInfo.getPlugins().toXContent(readPluginsAndModules, params); + readPluginsAndModules.endObject(); + assertThat(pluginsAndModules.string(), equalTo(readPluginsAndModules.string())); + } + + private void compareJsonOutput(ToXContent param1, ToXContent param2) throws IOException { + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + XContentBuilder param1Builder = jsonBuilder(); + XContentBuilder param2Builder = jsonBuilder(); + param1.toXContent(param1Builder, params); + param2.toXContent(param2Builder, params); + assertThat(param1Builder.string(), equalTo(param2Builder.string())); + } + + + private NodeInfo createNodeInfo() { + Build build = Build.CURRENT; + DiscoveryNode node = new DiscoveryNode("test_node", DummyTransportAddress.INSTANCE, VersionUtils.randomVersion(random())); + Map serviceAttributes = new HashMap<>(); + serviceAttributes.put("test", "attribute"); + Settings settings = Settings.builder().put("test", "setting").build(); + OsInfo osInfo = DummyOsInfo.INSTANCE; + ProcessInfo process = new ProcessInfo(randomInt(), randomBoolean()); + JvmInfo jvm = JvmInfo.jvmInfo(); + List threadPoolInfos = new ArrayList<>(); + threadPoolInfos.add(new ThreadPool.Info("test_threadpool", ThreadPool.ThreadPoolType.FIXED, 5)); + ThreadPoolInfo threadPoolInfo = new ThreadPoolInfo(threadPoolInfos); + Map profileAddresses = new HashMap<>(); + BoundTransportAddress dummyBoundTransportAddress = new BoundTransportAddress(new TransportAddress[]{DummyTransportAddress.INSTANCE}, DummyTransportAddress.INSTANCE); + profileAddresses.put("test_address", dummyBoundTransportAddress); + TransportInfo transport = new TransportInfo(dummyBoundTransportAddress, profileAddresses); + HttpInfo htttpInfo = new HttpInfo(dummyBoundTransportAddress, randomLong()); + PluginsAndModules plugins = new PluginsAndModules(); + plugins.addModule(DummyPluginInfo.INSTANCE); + plugins.addPlugin(DummyPluginInfo.INSTANCE); + return new NodeInfo(VersionUtils.randomVersion(random()), build, node, serviceAttributes, settings, osInfo, process, jvm, threadPoolInfo, transport, htttpInfo, plugins); + } +} diff --git a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java index 49d22b87bf8..514b1757e41 100644 --- a/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationIT.java @@ -34,7 +34,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { // The cluster scope is test b/c we can't clear cluster settings. public void testDestructiveOperations() throws Exception { Settings settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -58,7 +58,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { } settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, false) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -68,7 +68,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { // end delete index: // close index: settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), true) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); @@ -100,7 +100,7 @@ public class DestructiveOperationsIntegrationIT extends ESIntegTestCase { } settings = Settings.builder() - .put(DestructiveOperations.REQUIRES_NAME, false) + .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) .build(); assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); assertAcked(client().admin().indices().prepareClose("_all").get()); diff --git a/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java b/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java deleted file mode 100644 index 7831b7ca994..00000000000 --- a/core/src/test/java/org/elasticsearch/plugins/PluggableTransportModuleIT.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.plugins; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.transport.AssertingLocalTransport; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.Transport; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportModule; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestOptions; - -import java.io.IOException; -import java.util.Collection; -import java.util.concurrent.atomic.AtomicInteger; - -import static org.elasticsearch.common.settings.Settings.settingsBuilder; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.is; - -/** - * - */ -@ClusterScope(scope = Scope.SUITE, numDataNodes = 2) -public class PluggableTransportModuleIT extends ESIntegTestCase { - public static final AtomicInteger SENT_REQUEST_COUNTER = new AtomicInteger(0); - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder() - .put(super.nodeSettings(nodeOrdinal)) - .put(DiscoveryModule.DISCOVERY_TYPE_KEY, "local") - .build(); - } - - @Override - protected Collection> nodePlugins() { - return pluginList(CountingSentRequestsPlugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return pluginList(CountingSentRequestsPlugin.class); - } - - public void testThatPluginFunctionalityIsLoadedWithoutConfiguration() throws Exception { - for (Transport transport : internalCluster().getInstances(Transport.class)) { - assertThat(transport, instanceOf(CountingAssertingLocalTransport.class)); - } - - int countBeforeRequest = SENT_REQUEST_COUNTER.get(); - internalCluster().clientNodeClient().admin().cluster().prepareHealth().get(); - int countAfterRequest = SENT_REQUEST_COUNTER.get(); - assertThat("Expected send request counter to be greather than zero", countAfterRequest, is(greaterThan(countBeforeRequest))); - } - - public static class CountingSentRequestsPlugin extends Plugin { - @Override - public String name() { - return "counting-pipelines-plugin"; - } - - @Override - public String description() { - return "counting-pipelines-plugin"; - } - - public void onModule(TransportModule transportModule) { - transportModule.setTransport(CountingAssertingLocalTransport.class, this.name()); - } - } - - public static final class CountingAssertingLocalTransport extends AssertingLocalTransport { - - @Inject - public CountingAssertingLocalTransport(Settings settings, ThreadPool threadPool, Version version, NamedWriteableRegistry namedWriteableRegistry) { - super(settings, threadPool, version, namedWriteableRegistry); - } - - @Override - public void sendRequest(final DiscoveryNode node, final long requestId, final String action, final TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { - SENT_REQUEST_COUNTER.incrementAndGet(); - super.sendRequest(node, requestId, action, request, options); - } - } -} diff --git a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java index b9282cf05ad..a16f318140f 100644 --- a/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java +++ b/core/src/test/java/org/elasticsearch/plugins/responseheader/TestResponseHeaderPlugin.java @@ -19,8 +19,8 @@ package org.elasticsearch.plugins.responseheader; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestModule; public class TestResponseHeaderPlugin extends Plugin { @@ -34,7 +34,7 @@ public class TestResponseHeaderPlugin extends Plugin { return "test-plugin-custom-header-desc"; } - public void onModule(RestModule restModule) { - restModule.addRestAction(TestResponseHeaderRestAction.class); + public void onModule(NetworkModule module) { + module.registerRestHandler(TestResponseHeaderRestAction.class); } } diff --git a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java index 4bcbb8c8ee7..d7e13be312f 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java +++ b/core/src/test/java/org/elasticsearch/recovery/RecoverySettingsTests.java @@ -32,49 +32,49 @@ public class RecoverySettingsTests extends ESSingleNodeTestCase { } public void testAllSettingsAreDynamicallyUpdatable() { - innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, randomIntBetween(1, 200), new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.concurrentStreamPool().getMaximumPoolSize()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, randomIntBetween(1, 200), new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), randomIntBetween(1, 200), new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.concurrentSmallFileStreamPool().getMaximumPoolSize()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, 0, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), 0, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(null, recoverySettings.rateLimiter()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.retryDelayStateSync().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.retryDelayNetwork().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.activityTimeout().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.internalActionTimeout().millis()); } }); - innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT, randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { + innerTestSettings(RecoverySettings.INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 200), TimeUnit.MILLISECONDS, new Validator() { @Override public void validate(RecoverySettings recoverySettings, int expectedValue) { assertEquals(expectedValue, recoverySettings.internalActionLongTimeout().millis()); diff --git a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java index 57b5e888ea9..541911ce4e0 100644 --- a/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/RelocationIT.java @@ -387,7 +387,7 @@ public class RelocationIT extends ESIntegTestCase { logger.info("--> stopping replica assignment"); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, "none"))); + .setTransientSettings(Settings.builder().put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none"))); logger.info("--> wait for all replica shards to be removed, on all nodes"); assertBusy(new Runnable() { diff --git a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java index 4d111469505..60a14abac7c 100644 --- a/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java +++ b/core/src/test/java/org/elasticsearch/recovery/TruncatedRecoveryIT.java @@ -29,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.discovery.Discovery; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.recovery.IndexRecoveryIT; import org.elasticsearch.indices.recovery.RecoveryFileChunkRequest; @@ -84,7 +83,7 @@ public class TruncatedRecoveryIT extends ESIntegTestCase { } } assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2)); - Collections.shuffle(dataNodeStats, getRandom()); + Collections.shuffle(dataNodeStats, random()); // we use 2 nodes a lucky and unlucky one // the lucky one holds the primary // the unlucky one gets the replica and the truncated leftovers diff --git a/core/src/test/java/org/elasticsearch/script/FileScriptTests.java b/core/src/test/java/org/elasticsearch/script/FileScriptTests.java index fc888c79a8c..987aef90bc3 100644 --- a/core/src/test/java/org/elasticsearch/script/FileScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/FileScriptTests.java @@ -54,7 +54,7 @@ public class FileScriptTests extends ESTestCase { .put("script.engine." + MockScriptEngine.NAME + ".file.aggs", false).build(); ScriptService scriptService = makeScriptService(settings); Script script = new Script("script1", ScriptService.ScriptType.FILE, MockScriptEngine.NAME, null); - assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders)); + assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap())); } public void testAllOpsDisabled() throws Exception { @@ -68,7 +68,7 @@ public class FileScriptTests extends ESTestCase { Script script = new Script("script1", ScriptService.ScriptType.FILE, MockScriptEngine.NAME, null); for (ScriptContext context : ScriptContext.Standard.values()) { try { - scriptService.compile(script, context, contextAndHeaders); + scriptService.compile(script, context, contextAndHeaders, Collections.emptyMap()); fail(context.getKey() + " script should have been rejected"); } catch(Exception e) { assertTrue(e.getMessage(), e.getMessage().contains("scripts of type [file], operation [" + context.getKey() + "] and lang [" + MockScriptEngine.NAME + "] are disabled")); diff --git a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java index 02fad319846..47adeabe02f 100644 --- a/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/core/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.watcher.ResourceWatcherService; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -62,7 +63,7 @@ public class NativeScriptTests extends ESTestCase { ScriptService scriptService = injector.getInstance(ScriptService.class); ExecutableScript executable = scriptService.executable(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), - ScriptContext.Standard.SEARCH, contextAndHeaders); + ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); assertThat(executable.run().toString(), equalTo("test")); terminate(injector.getInstance(ThreadPool.class)); } @@ -88,7 +89,7 @@ public class NativeScriptTests extends ESTestCase { for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) { assertThat(scriptService.compile(new Script("my", ScriptType.INLINE, NativeScriptEngineService.NAME, null), scriptContext, - contextAndHeaders), notNullValue()); + contextAndHeaders, Collections.emptyMap()), notNullValue()); } } @@ -110,4 +111,4 @@ public class NativeScriptTests extends ESTestCase { return "test"; } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java b/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java index 0edaedbb28e..019eb7c74a0 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptContextTests.java @@ -58,7 +58,7 @@ public class ScriptContextTests extends ESTestCase { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); - scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op"), contextAndHeaders); + scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op"), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (ScriptException e) { assertThat(e.getMessage(), containsString("scripts of type [" + scriptType + "], operation [" + PLUGIN_NAME + "_custom_globally_disabled_op] and lang [" + MockScriptEngine.NAME + "] are disabled")); @@ -71,16 +71,16 @@ public class ScriptContextTests extends ESTestCase { ScriptService scriptService = makeScriptService(); Script script = new Script("1", ScriptService.ScriptType.INLINE, MockScriptEngine.NAME, null); try { - scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"), contextAndHeaders); + scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op"), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (ScriptException e) { assertTrue(e.getMessage(), e.getMessage().contains("scripts of type [inline], operation [" + PLUGIN_NAME + "_custom_exp_disabled_op] and lang [" + MockScriptEngine.NAME + "] are disabled")); } // still works for other script contexts - assertNotNull(scriptService.compile(script, ScriptContext.Standard.AGGS, contextAndHeaders)); - assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders)); - assertNotNull(scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_op"), contextAndHeaders)); + assertNotNull(scriptService.compile(script, ScriptContext.Standard.AGGS, contextAndHeaders, Collections.emptyMap())); + assertNotNull(scriptService.compile(script, ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap())); + assertNotNull(scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "custom_op"), contextAndHeaders, Collections.emptyMap())); } public void testUnknownPluginScriptContext() throws Exception { @@ -89,7 +89,7 @@ public class ScriptContextTests extends ESTestCase { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); - scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "unknown"), contextAndHeaders); + scriptService.compile(script, new ScriptContext.Plugin(PLUGIN_NAME, "unknown"), contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("script context [" + PLUGIN_NAME + "_unknown] not supported")); @@ -109,7 +109,7 @@ public class ScriptContextTests extends ESTestCase { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { Script script = new Script("1", scriptType, MockScriptEngine.NAME, null); - scriptService.compile(script, context, contextAndHeaders); + scriptService.compile(script, context, contextAndHeaders, Collections.emptyMap()); fail("script compilation should have been rejected"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("script context [test] not supported")); diff --git a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java index 3e476d2bebb..0afd72dab2b 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptModesTests.java @@ -252,7 +252,7 @@ public class ScriptModesTests extends ESTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return null; } diff --git a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 23cada02c6c..ab325e9e0c9 100644 --- a/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/core/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -33,6 +33,7 @@ import org.junit.Before; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -48,7 +49,7 @@ import static org.hamcrest.Matchers.sameInstance; public class ScriptServiceTests extends ESTestCase { private ResourceWatcherService resourceWatcherService; - private Set scriptEngineServices; + private ScriptEngineService scriptEngineService; private Map scriptEnginesByLangMap; private ScriptContextRegistry scriptContextRegistry; private ScriptContext[] scriptContexts; @@ -72,8 +73,8 @@ public class ScriptServiceTests extends ESTestCase { .put("path.conf", genericConfigFolder) .build(); resourceWatcherService = new ResourceWatcherService(baseSettings, null); - scriptEngineServices = newHashSet(new TestEngineService()); - scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(scriptEngineServices); + scriptEngineService = new TestEngineService(); + scriptEnginesByLangMap = ScriptModesTests.buildScriptEnginesByLangMap(Collections.singleton(scriptEngineService)); //randomly register custom script contexts int randomInt = randomIntBetween(0, 3); //prevent duplicates using map @@ -100,7 +101,7 @@ public class ScriptServiceTests extends ESTestCase { private void buildScriptService(Settings additionalSettings) throws IOException { Settings finalSettings = Settings.builder().put(baseSettings).put(additionalSettings).build(); Environment environment = new Environment(finalSettings); - scriptService = new ScriptService(finalSettings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry) { + scriptService = new ScriptService(finalSettings, environment, Collections.singleton(scriptEngineService), resourceWatcherService, scriptContextRegistry) { @Override String getScriptFromIndex(String scriptLang, String id, HasContextAndHeaders headersContext) { //mock the script that gets retrieved from an index @@ -131,7 +132,7 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly processed"); CompiledScript compiledScript = scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), - ScriptContext.Standard.SEARCH, contextAndHeaders); + ScriptContext.Standard.SEARCH, contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); logger.info("--> delete both files"); @@ -142,7 +143,7 @@ public class ScriptServiceTests extends ESTestCase { logger.info("--> verify that file with extension was correctly removed"); try { scriptService.compile(new Script("test_script", ScriptType.FILE, "test", null), ScriptContext.Standard.SEARCH, - contextAndHeaders); + contextAndHeaders, Collections.emptyMap()); fail("the script test_script should no longer exist"); } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk file script [test_script] using lang [test]")); @@ -153,9 +154,9 @@ public class ScriptServiceTests extends ESTestCase { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); CompiledScript compiledScript2 = scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -163,9 +164,9 @@ public class ScriptServiceTests extends ESTestCase { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); CompiledScript compiledScript1 = scriptService.compile(new Script("script", ScriptType.INLINE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); CompiledScript compiledScript2 = scriptService.compile(new Script("script", ScriptType.INLINE, "test2", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -174,9 +175,9 @@ public class ScriptServiceTests extends ESTestCase { buildScriptService(Settings.EMPTY); createFileScripts("test"); CompiledScript compiledScript1 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); CompiledScript compiledScript2 = scriptService.compile(new Script("file_script", ScriptType.FILE, "test2", null), - randomFrom(scriptContexts), contextAndHeaders); + randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertThat(compiledScript1.compiled(), sameInstance(compiledScript2.compiled())); } @@ -225,13 +226,11 @@ public class ScriptServiceTests extends ESTestCase { } while (scriptContextSettings.containsKey(scriptContext)); scriptContextSettings.put(scriptContext, randomFrom(ScriptMode.values())); } - int numEngineSettings = randomIntBetween(0, 10); + int numEngineSettings = randomIntBetween(0, ScriptType.values().length * scriptContexts.length); Map engineSettings = new HashMap<>(); for (int i = 0; i < numEngineSettings; i++) { String settingKey; do { - ScriptEngineService[] scriptEngineServices = this.scriptEngineServices.toArray(new ScriptEngineService[this.scriptEngineServices.size()]); - ScriptEngineService scriptEngineService = randomFrom(scriptEngineServices); ScriptType scriptType = randomFrom(ScriptType.values()); ScriptContext scriptContext = randomFrom(this.scriptContexts); settingKey = scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey(); @@ -288,40 +287,38 @@ public class ScriptServiceTests extends ESTestCase { buildScriptService(builder.build()); createFileScripts("groovy", "expression", "mustache", "test"); - for (ScriptEngineService scriptEngineService : scriptEngineServices) { - for (ScriptType scriptType : ScriptType.values()) { - //make sure file scripts have a different name than inline ones. - //Otherwise they are always considered file ones as they can be found in the static cache. - String script = scriptType == ScriptType.FILE ? "file_script" : "script"; - for (ScriptContext scriptContext : this.scriptContexts) { - //fallback mechanism: 1) engine specific settings 2) op based settings 3) source based settings - ScriptMode scriptMode = engineSettings.get(scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey()); - if (scriptMode == null) { - scriptMode = scriptContextSettings.get(scriptContext.getKey()); - } - if (scriptMode == null) { - scriptMode = scriptSourceSettings.get(scriptType); - } - if (scriptMode == null) { - scriptMode = DEFAULT_SCRIPT_MODES.get(scriptType); - } + for (ScriptType scriptType : ScriptType.values()) { + //make sure file scripts have a different name than inline ones. + //Otherwise they are always considered file ones as they can be found in the static cache. + String script = scriptType == ScriptType.FILE ? "file_script" : "script"; + for (ScriptContext scriptContext : this.scriptContexts) { + //fallback mechanism: 1) engine specific settings 2) op based settings 3) source based settings + ScriptMode scriptMode = engineSettings.get(scriptEngineService.types()[0] + "." + scriptType + "." + scriptContext.getKey()); + if (scriptMode == null) { + scriptMode = scriptContextSettings.get(scriptContext.getKey()); + } + if (scriptMode == null) { + scriptMode = scriptSourceSettings.get(scriptType); + } + if (scriptMode == null) { + scriptMode = DEFAULT_SCRIPT_MODES.get(scriptType); + } - for (String lang : scriptEngineService.types()) { - switch (scriptMode) { - case ON: + for (String lang : scriptEngineService.types()) { + switch (scriptMode) { + case ON: + assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); + break; + case OFF: + assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); + break; + case SANDBOX: + if (scriptEngineService.sandboxed()) { assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); - break; - case OFF: + } else { assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); - break; - case SANDBOX: - if (scriptEngineService.sandboxed()) { - assertCompileAccepted(lang, script, scriptType, scriptContext, contextAndHeaders); - } else { - assertCompileRejected(lang, script, scriptType, scriptContext, contextAndHeaders); - } - break; - } + } + break; } } } @@ -338,15 +335,13 @@ public class ScriptServiceTests extends ESTestCase { unknownContext = randomAsciiOfLength(randomIntBetween(1, 30)); } while(scriptContextRegistry.isSupportedContext(new ScriptContext.Plugin(pluginName, unknownContext))); - for (ScriptEngineService scriptEngineService : scriptEngineServices) { - for (String type : scriptEngineService.types()) { - try { - scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( - pluginName, unknownContext), contextAndHeaders); - fail("script compilation should have been rejected"); - } catch(IllegalArgumentException e) { - assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); - } + for (String type : scriptEngineService.types()) { + try { + scriptService.compile(new Script("test", randomFrom(ScriptType.values()), type, null), new ScriptContext.Plugin( + pluginName, unknownContext), contextAndHeaders, Collections.emptyMap()); + fail("script compilation should have been rejected"); + } catch(IllegalArgumentException e) { + assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); } } } @@ -354,20 +349,20 @@ public class ScriptServiceTests extends ESTestCase { public void testCompileCountedInCompilationStats() throws IOException { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.compile(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testExecutableCountedInCompilationStats() throws IOException { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testSearchCountedInCompilationStats() throws IOException { buildScriptService(Settings.EMPTY); - scriptService.search(null, new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts)); + scriptService.search(null, new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -377,7 +372,7 @@ public class ScriptServiceTests extends ESTestCase { int numberOfCompilations = randomIntBetween(1, 1024); for (int i = 0; i < numberOfCompilations; i++) { scriptService - .compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + .compile(new Script(i + " + " + i, ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); } assertEquals(numberOfCompilations, scriptService.stats().getCompilations()); } @@ -387,8 +382,8 @@ public class ScriptServiceTests extends ESTestCase { Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -396,14 +391,14 @@ public class ScriptServiceTests extends ESTestCase { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); createFileScripts("test"); - scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.compile(new Script("file_script", ScriptType.FILE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } public void testIndexedScriptCountedInCompilationStats() throws IOException { ContextAndHeaderHolder contextAndHeaders = new ContextAndHeaderHolder(); buildScriptService(Settings.EMPTY); - scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.compile(new Script("script", ScriptType.INDEXED, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(1L, scriptService.stats().getCompilations()); } @@ -412,8 +407,8 @@ public class ScriptServiceTests extends ESTestCase { Settings.Builder builder = Settings.builder(); builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING, 1); buildScriptService(builder.build()); - scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); - scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders); + scriptService.executable(new Script("1+1", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); + scriptService.executable(new Script("2+2", ScriptType.INLINE, "test", null), randomFrom(scriptContexts), contextAndHeaders, Collections.emptyMap()); assertEquals(2L, scriptService.stats().getCompilations()); assertEquals(1L, scriptService.stats().getCacheEvictions()); } @@ -429,7 +424,7 @@ public class ScriptServiceTests extends ESTestCase { private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, HasContextAndHeaders contextAndHeaders) { try { - scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders); + scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders, Collections.emptyMap()); fail("compile should have been rejected for lang [" + lang + "], script_type [" + scriptType + "], scripted_op [" + scriptContext + "]"); } catch(ScriptException e) { //all good @@ -438,7 +433,7 @@ public class ScriptServiceTests extends ESTestCase { private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, HasContextAndHeaders contextAndHeaders) { - assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders), notNullValue()); + assertThat(scriptService.compile(new Script(script, scriptType, lang, null), scriptContext, contextAndHeaders, Collections.emptyMap()), notNullValue()); } public static class TestEngineService implements ScriptEngineService { @@ -459,7 +454,7 @@ public class ScriptServiceTests extends ESTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return "compiled_" + script; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java index 0a660b85374..aad2c9bb3ed 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/BooleanTermsIT.java @@ -99,20 +99,22 @@ public class BooleanTermsIT extends ESIntegTestCase { final int bucketCount = numSingleFalses > 0 && numSingleTrues > 0 ? 2 : numSingleFalses + numSingleTrues > 0 ? 1 : 0; assertThat(terms.getBuckets().size(), equalTo(bucketCount)); - Terms.Bucket bucket = terms.getBucketByKey("0"); + Terms.Bucket bucket = terms.getBucketByKey("false"); if (numSingleFalses == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numSingleFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); } - bucket = terms.getBucketByKey("1"); + bucket = terms.getBucketByKey("true"); if (numSingleTrues == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numSingleTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); } } @@ -131,20 +133,22 @@ public class BooleanTermsIT extends ESIntegTestCase { final int bucketCount = numMultiFalses > 0 && numMultiTrues > 0 ? 2 : numMultiFalses + numMultiTrues > 0 ? 1 : 0; assertThat(terms.getBuckets().size(), equalTo(bucketCount)); - Terms.Bucket bucket = terms.getBucketByKey("0"); + Terms.Bucket bucket = terms.getBucketByKey("false"); if (numMultiFalses == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numMultiFalses, bucket.getDocCount()); + assertEquals("false", bucket.getKeyAsString()); } - bucket = terms.getBucketByKey("1"); + bucket = terms.getBucketByKey("true"); if (numMultiTrues == 0) { assertNull(bucket); } else { assertNotNull(bucket); assertEquals(numMultiTrues, bucket.getDocCount()); + assertEquals("true", bucket.getKeyAsString()); } } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java index b6611a956af..540420c21bc 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/ChildrenIT.java @@ -18,12 +18,15 @@ */ package org.elasticsearch.search.aggregations.bucket; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.bucket.children.Children; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.sum.Sum; @@ -392,6 +395,65 @@ public class ChildrenIT extends ESIntegTestCase { assertThat(terms.getBuckets().get(0).getDocCount(), equalTo(1l)); } + public void testPostCollectAllLeafReaders() throws Exception { + // The 'towns' and 'parent_names' aggs operate on parent docs and if child docs are in different segments we need + // to ensure those segments which child docs are also evaluated to in the post collect phase. + + // Before we only evaluated segments that yielded matches in 'towns' and 'parent_names' aggs, which caused + // us to miss to evaluate child docs in segments we didn't have parent matches for. + + assertAcked( + prepareCreate("index") + .addMapping("parentType", "name", "type=string,index=not_analyzed", "town", "type=string,index=not_analyzed") + .addMapping("childType", "_parent", "type=parentType", "name", "type=string,index=not_analyzed", "age", "type=integer") + ); + List requests = new ArrayList<>(); + requests.add(client().prepareIndex("index", "parentType", "1").setSource("name", "Bob", "town", "Memphis")); + requests.add(client().prepareIndex("index", "parentType", "2").setSource("name", "Alice", "town", "Chicago")); + requests.add(client().prepareIndex("index", "parentType", "3").setSource("name", "Bill", "town", "Chicago")); + requests.add(client().prepareIndex("index", "childType", "1").setSource("name", "Jill", "age", 5).setParent("1")); + requests.add(client().prepareIndex("index", "childType", "2").setSource("name", "Joey", "age", 3).setParent("1")); + requests.add(client().prepareIndex("index", "childType", "3").setSource("name", "John", "age", 2).setParent("2")); + requests.add(client().prepareIndex("index", "childType", "4").setSource("name", "Betty", "age", 6).setParent("3")); + requests.add(client().prepareIndex("index", "childType", "5").setSource("name", "Dan", "age", 1).setParent("3")); + indexRandom(true, requests); + + SearchResponse response = client().prepareSearch("index") + .setSize(0) + .addAggregation(AggregationBuilders.terms("towns").field("town") + .subAggregation(AggregationBuilders.terms("parent_names").field("name") + .subAggregation(AggregationBuilders.children("child_docs").childType("childType")) + ) + ) + .get(); + + Terms towns = response.getAggregations().get("towns"); + assertThat(towns.getBuckets().size(), equalTo(2)); + assertThat(towns.getBuckets().get(0).getKeyAsString(), equalTo("Chicago")); + assertThat(towns.getBuckets().get(0).getDocCount(), equalTo(2L)); + + Terms parents = towns.getBuckets().get(0).getAggregations().get("parent_names"); + assertThat(parents.getBuckets().size(), equalTo(2)); + assertThat(parents.getBuckets().get(0).getKeyAsString(), equalTo("Alice")); + assertThat(parents.getBuckets().get(0).getDocCount(), equalTo(1L)); + Children children = parents.getBuckets().get(0).getAggregations().get("child_docs"); + assertThat(children.getDocCount(), equalTo(1L)); + + assertThat(parents.getBuckets().get(1).getKeyAsString(), equalTo("Bill")); + assertThat(parents.getBuckets().get(1).getDocCount(), equalTo(1L)); + children = parents.getBuckets().get(1).getAggregations().get("child_docs"); + assertThat(children.getDocCount(), equalTo(2L)); + + assertThat(towns.getBuckets().get(1).getKeyAsString(), equalTo("Memphis")); + assertThat(towns.getBuckets().get(1).getDocCount(), equalTo(1L)); + parents = towns.getBuckets().get(1).getAggregations().get("parent_names"); + assertThat(parents.getBuckets().size(), equalTo(1)); + assertThat(parents.getBuckets().get(0).getKeyAsString(), equalTo("Bob")); + assertThat(parents.getBuckets().get(0).getDocCount(), equalTo(1L)); + children = parents.getBuckets().get(0).getAggregations().get("child_docs"); + assertThat(children.getDocCount(), equalTo(2L)); + } + private static final class Control { final String category; diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index 9a1d498ad6b..97cd659a1d9 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -1429,7 +1429,7 @@ public class DateHistogramIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -1555,7 +1555,7 @@ public class DateHistogramIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java index a1f4b20dc1c..349b61fc37e 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedIT.java @@ -423,10 +423,10 @@ public class NestedIT extends ESIntegTestCase { Terms startDate = response.getAggregations().get("startDate"); assertThat(startDate.getBuckets().size(), equalTo(2)); - Terms.Bucket bucket = startDate.getBucketByKey("1414800000000"); // 2014-11-01T00:00:00.000Z + Terms.Bucket bucket = startDate.getBucketByKey("2014-11-01T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); Terms endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("1417305600000"); // 2014-11-30T00:00:00.000Z + bucket = endDate.getBucketByKey("2014-11-30T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); Terms period = bucket.getAggregations().get("period"); bucket = period.getBucketByKey("2014-11"); @@ -440,10 +440,10 @@ public class NestedIT extends ESIntegTestCase { Terms tags = nestedTags.getAggregations().get("tag"); assertThat(tags.getBuckets().size(), equalTo(0)); // and this must be empty - bucket = startDate.getBucketByKey("1417392000000"); // 2014-12-01T00:00:00.000Z + bucket = startDate.getBucketByKey("2014-12-01T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); endDate = bucket.getAggregations().get("endDate"); - bucket = endDate.getBucketByKey("1419984000000"); // 2014-12-31T00:00:00.000Z + bucket = endDate.getBucketByKey("2014-12-31T00:00:00.000Z"); assertThat(bucket.getDocCount(), equalTo(1l)); period = bucket.getAggregations().get("period"); bucket = period.getBucketByKey("2014-12"); diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java index ac146706eb5..db02d6ccb03 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgIT.java @@ -364,7 +364,7 @@ public class AvgIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -500,7 +500,7 @@ public class AvgIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -585,4 +585,4 @@ public class AvgIT extends AbstractNumericTestCase { public void scriptRemoved(CompiledScript script) { } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java index d87de000108..2c27bde57dc 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/SumIT.java @@ -359,7 +359,7 @@ public class SumIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -497,7 +497,7 @@ public class SumIT extends AbstractNumericTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -583,4 +583,4 @@ public class SumIT extends AbstractNumericTestCase { public void scriptRemoved(CompiledScript script) { } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java index fde7256ad01..c4611546493 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountIT.java @@ -244,7 +244,7 @@ public class ValueCountIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -330,4 +330,4 @@ public class ValueCountIT extends ESIntegTestCase { public void scriptRemoved(CompiledScript script) { } } -} \ No newline at end of file +} diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index 90d4437fcea..207b626409b 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -57,6 +57,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.min; import static org.elasticsearch.search.aggregations.AggregationBuilders.range; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.movingAvg; +import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; diff --git a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java index 69c4bbdbd11..1f421292371 100644 --- a/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java +++ b/core/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingIT.java @@ -61,7 +61,7 @@ public class SearchWhileRelocatingIT extends ESIntegTestCase { final int numShards = between(1, 20); client().admin().indices().prepareCreate("test") .setSettings(settingsBuilder().put("index.number_of_shards", numShards).put("index.number_of_replicas", numberOfReplicas)) - .addMapping("type1", "loc", "type=geo_point", "test", "type=string").execute().actionGet(); + .addMapping("type", "loc", "type=geo_point", "test", "type=string").execute().actionGet(); ensureGreen(); List indexBuilders = new ArrayList<>(); final int numDocs = between(10, 20); diff --git a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 891ebf7e2d1..b80810fc6d5 100644 --- a/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -34,7 +34,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.AbstractQueryTestCase; import org.elasticsearch.index.query.EmptyQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -47,7 +51,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder; import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder.InnerHit; import org.elasticsearch.search.fetch.source.FetchSourceContext; -import org.elasticsearch.search.highlight.HighlightBuilder; +import org.elasticsearch.search.highlight.HighlightBuilderTests; import org.elasticsearch.search.rescore.RescoreBuilder; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -251,8 +255,7 @@ public class SearchSourceBuilderTests extends ESTestCase { } } if (randomBoolean()) { - // NORELEASE need a random highlight builder method - builder.highlighter(new HighlightBuilder().field(randomAsciiOfLengthBetween(5, 20))); + builder.highlighter(HighlightBuilderTests.randomHighlighterBuilder()); } if (randomBoolean()) { // NORELEASE need a random suggest builder method diff --git a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java index 4be2b36fbe6..63c142f1e74 100644 --- a/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/child/ChildQuerySearchIT.java @@ -1176,7 +1176,7 @@ public class ChildQuerySearchIT extends ESIntegTestCase { .endObject().endObject()).get(); fail(); } catch (IllegalArgumentException e) { - assertThat(e.toString(), containsString("Merge failed with failures {[The _parent field's type option can't be changed: [null]->[parent]")); + assertThat(e.toString(), containsString("The _parent field's type option can't be changed: [null]->[parent]")); } } diff --git a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java index 847e03e5c44..c0cc17fc43d 100644 --- a/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java +++ b/core/src/test/java/org/elasticsearch/search/geo/GeoShapeQueryTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.geo; import com.spatial4j.core.shape.Rectangle; + import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.geo.ShapeRelation; @@ -47,7 +48,10 @@ import static org.elasticsearch.test.geo.RandomShapeGenerator.xRandomPoint; import static org.elasticsearch.test.geo.RandomShapeGenerator.xRandomRectangle; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.nullValue; public class GeoShapeQueryTests extends ESSingleNodeTestCase { public void testNullShape() throws Exception { @@ -396,6 +400,12 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase { .setPostFilter(filter).get(); assertSearchResponse(result); assertHitCount(result, 1); + // no shape + filter = QueryBuilders.geoShapeQuery("location", ShapeBuilders.newGeometryCollection()); + result = client().prepareSearch("test").setTypes("type").setQuery(QueryBuilders.matchAllQuery()) + .setPostFilter(filter).get(); + assertSearchResponse(result); + assertHitCount(result, 0); } public void testPointsOnly() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java index 5a8d7c0150a..05b999a9196 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/CustomHighlighter.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.highlight; -import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; @@ -52,12 +51,12 @@ public class CustomHighlighter implements Highlighter { } List responses = new ArrayList<>(); - responses.add(new StringText(String.format(Locale.ENGLISH, "standard response for %s at position %s", field.field(), + responses.add(new Text(String.format(Locale.ENGLISH, "standard response for %s at position %s", field.field(), cacheEntry.position))); if (field.fieldOptions().options() != null) { for (Map.Entry entry : field.fieldOptions().options().entrySet()) { - responses.add(new StringText("field:" + entry.getKey() + ":" + entry.getValue())); + responses.add(new Text("field:" + entry.getKey() + ":" + entry.getValue())); } } diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java index 75fc9f98156..2ac5895c9eb 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlightBuilderTests.java @@ -19,10 +19,10 @@ package org.elasticsearch.search.highlight; -import org.elasticsearch.common.ParseFieldMatcher; -import org.elasticsearch.common.ParsingException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParseFieldMatcher; +import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -52,8 +52,8 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermQueryParser; import org.elasticsearch.indices.query.IndicesQueriesRegistry; -import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.highlight.HighlightBuilder.Field; +import org.elasticsearch.search.highlight.HighlightBuilder.Order; import org.elasticsearch.search.highlight.SearchContextHighlight.FieldOptions; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -159,7 +159,8 @@ public class HighlightBuilderTests extends ESTestCase { XContentParser parser = XContentHelper.createParser(builder.bytes()); context.reset(parser); - HighlightBuilder secondHighlightBuilder = HighlightBuilder.fromXContent(context); + parser.nextToken(); + HighlightBuilder secondHighlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertNotSame(highlightBuilder, secondHighlightBuilder); assertEquals(highlightBuilder, secondHighlightBuilder); assertEquals(highlightBuilder.hashCode(), secondHighlightBuilder.hashCode()); @@ -179,7 +180,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse array with name [bad_fieldname]", e.getMessage()); @@ -196,7 +197,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse array with name [bad_fieldname]", e.getMessage()); @@ -216,7 +217,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("unexpected fieldname [bad_fieldname]", e.getMessage()); @@ -233,7 +234,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("unexpected fieldname [bad_fieldname]", e.getMessage()); @@ -253,7 +254,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse object with name [bad_fieldname]", e.getMessage()); @@ -270,7 +271,7 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("expected a parsing exception"); } catch (ParsingException e) { assertEquals("cannot parse object with name [bad_fieldname]", e.getMessage()); @@ -354,7 +355,7 @@ public class HighlightBuilderTests extends ESTestCase { XContentParser parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); context.reset(parser); - HighlightBuilder highlightBuilder = HighlightBuilder.fromXContent(context); + HighlightBuilder highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertArrayEquals("setting tags_schema 'styled' should alter pre_tags", HighlightBuilder.DEFAULT_STYLED_PRE_TAG, highlightBuilder.preTags()); assertArrayEquals("setting tags_schema 'styled' should alter post_tags", HighlightBuilder.DEFAULT_STYLED_POST_TAGS, @@ -366,7 +367,7 @@ public class HighlightBuilderTests extends ESTestCase { parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); context.reset(parser); - highlightBuilder = HighlightBuilder.fromXContent(context); + highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); assertArrayEquals("setting tags_schema 'default' should alter pre_tags", HighlightBuilder.DEFAULT_PRE_TAGS, highlightBuilder.preTags()); assertArrayEquals("setting tags_schema 'default' should alter post_tags", HighlightBuilder.DEFAULT_POST_TAGS, @@ -379,13 +380,66 @@ public class HighlightBuilderTests extends ESTestCase { context.reset(parser); try { - highlightBuilder = HighlightBuilder.fromXContent(context); + HighlightBuilder.PROTOTYPE.fromXContent(context); fail("setting unknown tag schema should throw exception"); } catch (IllegalArgumentException e) { assertEquals("Unknown tag schema [somthing_else]", e.getMessage()); } } + /** + * test parsing empty highlight or empty fields blocks + */ + public void testParsingEmptyStructure() throws IOException { + QueryParseContext context = new QueryParseContext(indicesQueriesRegistry); + context.parseFieldMatcher(new ParseFieldMatcher(Settings.EMPTY)); + String highlightElement = "{ }"; + XContentParser parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); + + context.reset(parser); + HighlightBuilder highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); + assertEquals("expected plain HighlightBuilder", new HighlightBuilder(), highlightBuilder); + + highlightElement = "{ \"fields\" : { } }"; + parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); + + context.reset(parser); + highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); + assertEquals("defining no field should return plain HighlightBuilder", new HighlightBuilder(), highlightBuilder); + + highlightElement = "{ \"fields\" : { \"foo\" : { } } }"; + parser = XContentFactory.xContent(highlightElement).createParser(highlightElement); + + context.reset(parser); + highlightBuilder = HighlightBuilder.PROTOTYPE.fromXContent(context); + assertEquals("expected HighlightBuilder with field", new HighlightBuilder().field(new Field("foo")), highlightBuilder); + System.out.println(Math.log(1/(double)(1+1)) + 1.0); + } + + /** + * test ordinals of {@link Order}, since serialization depends on it + */ + public void testValidOrderOrdinals() { + assertThat(Order.NONE.ordinal(), equalTo(0)); + assertThat(Order.SCORE.ordinal(), equalTo(1)); + } + + public void testOrderSerialization() throws Exception { + try (BytesStreamOutput out = new BytesStreamOutput()) { + Order.NONE.writeTo(out); + try (StreamInput in = StreamInput.wrap(out.bytes())) { + assertThat(in.readVInt(), equalTo(0)); + } + } + + try (BytesStreamOutput out = new BytesStreamOutput()) { + Order.SCORE.writeTo(out); + try (StreamInput in = StreamInput.wrap(out.bytes())) { + assertThat(in.readVInt(), equalTo(1)); + } + } + } + protected static XContentBuilder toXContent(HighlightBuilder highlight, XContentType contentType) throws IOException { XContentBuilder builder = XContentFactory.contentBuilder(contentType); if (randomBoolean()) { @@ -396,9 +450,9 @@ public class HighlightBuilderTests extends ESTestCase { } /** - * create random shape that is put under test + * create random highlight builder that is put under test */ - private static HighlightBuilder randomHighlighterBuilder() { + public static HighlightBuilder randomHighlighterBuilder() { HighlightBuilder testHighlighter = new HighlightBuilder(); setRandomCommonOptions(testHighlighter); testHighlighter.useExplicitFieldOrder(randomBoolean()); @@ -457,7 +511,12 @@ public class HighlightBuilderTests extends ESTestCase { highlightBuilder.highlightQuery(highlightQuery); } if (randomBoolean()) { - highlightBuilder.order(randomAsciiOfLengthBetween(1, 10)); + if (randomBoolean()) { + highlightBuilder.order(randomFrom(Order.values())); + } else { + // also test the string setter + highlightBuilder.order(randomFrom(Order.values()).toString()); + } } if (randomBoolean()) { highlightBuilder.highlightFilter(randomBoolean()); @@ -526,7 +585,11 @@ public class HighlightBuilderTests extends ESTestCase { highlightBuilder.highlightQuery(new TermQueryBuilder(randomAsciiOfLengthBetween(11, 20), randomAsciiOfLengthBetween(11, 20))); break; case 8: - highlightBuilder.order(randomAsciiOfLengthBetween(11, 20)); + if (highlightBuilder.order() == Order.NONE) { + highlightBuilder.order(Order.SCORE); + } else { + highlightBuilder.order(Order.NONE); + } break; case 9: highlightBuilder.highlightFilter(toggleOrSet(highlightBuilder.highlightFilter())); diff --git a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java index 8a2506ec3ad..4063ec81a28 100644 --- a/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchIT.java @@ -58,7 +58,6 @@ import static org.elasticsearch.index.query.QueryBuilders.fuzzyQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhrasePrefixQuery; import static org.elasticsearch.index.query.QueryBuilders.matchPhraseQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @@ -67,6 +66,7 @@ import static org.elasticsearch.index.query.QueryBuilders.regexpQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.typeQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.highlight; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -1557,7 +1557,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmenter("simple"))).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); response = client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) @@ -1566,7 +1566,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { .fragmenter("span"))).get(); assertHighlight(response, 0, "tags", 0, equalTo("this is a really long tag i would like to highlight")); - assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); + assertHighlight(response, 0, "tags", 1, 2, equalTo("here is another one that is very long tag and has the tag token near the end")); assertFailures(client().prepareSearch("test") .setQuery(QueryBuilders.matchQuery("tags", "long tag").type(MatchQuery.Type.PHRASE)) @@ -2062,7 +2062,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { searchResponse = client().search(searchRequest("test").source(source)).actionGet(); - assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy quick dog")); + assertHighlight(searchResponse, 0, "field2", 0, 1, equalTo("The quick brown fox jumps over the lazy quick dog")); } public void testPostingsHighlighterMultipleFields() throws Exception { @@ -2471,7 +2471,7 @@ public class HighlighterSearchIT extends ESIntegTestCase { logger.info("--> highlighting and searching on field1"); SearchSourceBuilder source = searchSource().query(boolQuery() - .should(constantScoreQuery(QueryBuilders.missingQuery("field1"))) + .should(boolQuery().mustNot(QueryBuilders.existsQuery("field1"))) .should(matchQuery("field1", "test")) .should(constantScoreQuery(queryStringQuery("field1:photo*")))) .highlighter(highlight().field("field1")); @@ -2501,7 +2501,9 @@ public class HighlighterSearchIT extends ESIntegTestCase { refresh(); logger.info("--> highlighting and searching on field1"); - SearchSourceBuilder source = searchSource().query(boolQuery().must(queryStringQuery("field1:photo*")).filter(missingQuery("field_null"))) + SearchSourceBuilder source = searchSource().query(boolQuery() + .must(queryStringQuery("field1:photo*")) + .mustNot(existsQuery("field_null"))) .highlighter(highlight().field("field1")); SearchResponse searchResponse = client().prepareSearch("test").setSource(source).get(); assertHighlight(searchResponse, 0, "field1", 0, 1, equalTo("The photography word will get highlighted")); diff --git a/core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java b/core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java new file mode 100644 index 00000000000..5156209d6f1 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/highlight/PlainHighlighterTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.highlight; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.PhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.highlight.QueryScorer; +import org.apache.lucene.util.LuceneTestCase; + +public class PlainHighlighterTests extends LuceneTestCase { + + public void testHighlightPhrase() throws Exception { + Query query = new PhraseQuery.Builder() + .add(new Term("field", "foo")) + .add(new Term("field", "bar")) + .build(); + QueryScorer queryScorer = new CustomQueryScorer(query); + org.apache.lucene.search.highlight.Highlighter highlighter = new org.apache.lucene.search.highlight.Highlighter(queryScorer); + String[] frags = highlighter.getBestFragments(new MockAnalyzer(random()), "field", "bar foo bar foo", 10); + assertArrayEquals(new String[] {"bar foo bar foo"}, frags); + } + +} diff --git a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java index cc631d5df2a..0525fd28db1 100644 --- a/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java +++ b/core/src/test/java/org/elasticsearch/search/internal/InternalSearchHitTests.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.internal; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; @@ -39,25 +39,25 @@ public class InternalSearchHitTests extends ESTestCase { SearchShardTarget target = new SearchShardTarget("_node_id", "_index", 0); Map innerHits = new HashMap<>(); - InternalSearchHit innerHit1 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerHit1 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHit1.shardTarget(target); - InternalSearchHit innerInnerHit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerInnerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerInnerHit2.shardTarget(target); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerInnerHit2}, 1, 1f)); innerHit1.setInnerHits(innerHits); - InternalSearchHit innerHit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerHit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHit2.shardTarget(target); - InternalSearchHit innerHit3 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit innerHit3 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHit3.shardTarget(target); innerHits = new HashMap<>(); - InternalSearchHit hit1 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit hit1 = new InternalSearchHit(0, "_id", new Text("_type"), null); innerHits.put("1", new InternalSearchHits(new InternalSearchHit[]{innerHit1, innerHit2}, 1, 1f)); innerHits.put("2", new InternalSearchHits(new InternalSearchHit[]{innerHit3}, 1, 1f)); hit1.shardTarget(target); hit1.setInnerHits(innerHits); - InternalSearchHit hit2 = new InternalSearchHit(0, "_id", new StringText("_type"), null); + InternalSearchHit hit2 = new InternalSearchHit(0, "_id", new Text("_type"), null); hit2.shardTarget(target); InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[]{hit1, hit2}, 2, 1f); diff --git a/core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java b/core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java new file mode 100644 index 00000000000..83f6efaa150 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/ProfileTests.java @@ -0,0 +1,173 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field.Store; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.RandomApproximationQuery; +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TotalHitCountCollector; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.TestUtil; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.search.internal.ContextIndexSearcher; +import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public class ProfileTests extends ESTestCase { + + static Directory dir; + static IndexReader reader; + static ContextIndexSearcher searcher; + + @BeforeClass + public static void before() throws IOException { + dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir); + final int numDocs = TestUtil.nextInt(random(), 1, 20); + for (int i = 0; i < numDocs; ++i) { + final int numHoles = random().nextInt(5); + for (int j = 0; j < numHoles; ++j) { + w.addDocument(new Document()); + } + Document doc = new Document(); + doc.add(new StringField("foo", "bar", Store.NO)); + w.addDocument(doc); + } + reader = w.getReader(); + w.close(); + Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader)); + searcher = new ContextIndexSearcher(engineSearcher, IndexSearcher.getDefaultQueryCache(), MAYBE_CACHE_POLICY); + } + + @AfterClass + public static void after() throws IOException { + IOUtils.close(reader, dir); + dir = null; + reader = null; + searcher = null; + } + + public void testBasic() throws IOException { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.search(query, 1); + List results = profiler.getQueryTree(); + assertEquals(1, results.size()); + Map breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testNoScoring() throws IOException { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.search(query, 1, Sort.INDEXORDER); // scores are not needed + List results = profiler.getQueryTree(); + assertEquals(1, results.size()); + Map breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), equalTo(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testUseIndexStats() throws IOException { + Profiler profiler = new Profiler(); + searcher.setProfiler(profiler); + Query query = new TermQuery(new Term("foo", "bar")); + searcher.count(query); // will use index stats + List results = profiler.getQueryTree(); + assertEquals(0, results.size()); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + } + + public void testApproximations() throws IOException { + Profiler profiler = new Profiler(); + Engine.Searcher engineSearcher = new Engine.Searcher("test", new IndexSearcher(reader)); + // disable query caching since we want to test approximations, which won't + // be exposed on a cached entry + ContextIndexSearcher searcher = new ContextIndexSearcher(engineSearcher, null, MAYBE_CACHE_POLICY); + searcher.setProfiler(profiler); + Query query = new RandomApproximationQuery(new TermQuery(new Term("foo", "bar")), random()); + searcher.count(query); + List results = profiler.getQueryTree(); + assertEquals(1, results.size()); + Map breakdown = results.get(0).getTimeBreakdown(); + assertThat(breakdown.get(ProfileBreakdown.TimingType.CREATE_WEIGHT.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.BUILD_SCORER.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.NEXT_DOC.toString()).longValue(), greaterThan(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.ADVANCE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.SCORE.toString()).longValue(), equalTo(0L)); + assertThat(breakdown.get(ProfileBreakdown.TimingType.MATCH.toString()).longValue(), greaterThan(0L)); + + long rewriteTime = profiler.getRewriteTime(); + assertThat(rewriteTime, greaterThan(0L)); + + } + + public void testCollector() throws IOException { + TotalHitCountCollector collector = new TotalHitCountCollector(); + ProfileCollector profileCollector = new ProfileCollector(collector); + assertEquals(0, profileCollector.getTime()); + final LeafCollector leafCollector = profileCollector.getLeafCollector(reader.leaves().get(0)); + assertThat(profileCollector.getTime(), greaterThan(0L)); + long time = profileCollector.getTime(); + leafCollector.setScorer(Lucene.illegalScorer("dummy scorer")); + assertThat(profileCollector.getTime(), greaterThan(time)); + time = profileCollector.getTime(); + leafCollector.collect(0); + assertThat(profileCollector.getTime(), greaterThan(time)); + } +} diff --git a/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java new file mode 100644 index 00000000000..bb33364a751 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/QueryProfilerIT.java @@ -0,0 +1,596 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.elasticsearch.action.search.*; +import org.elasticsearch.search.SearchHit; +import org.apache.lucene.util.English; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.index.query.*; +import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.profile.RandomQueryGenerator.randomQueryBuilder; +import static org.elasticsearch.test.hamcrest.DoubleMatcher.nearlyEqual; +import static org.hamcrest.Matchers.*; + + +public class QueryProfilerIT extends ESIntegTestCase { + + /** + * This test simply checks to make sure nothing crashes. Test indexes 100-150 documents, + * constructs 20-100 random queries and tries to profile them + */ + public void testProfileQuery() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + List stringFields = Arrays.asList("field1"); + List numericFields = Arrays.asList("field2"); + + indexRandom(true, docs); + + refresh(); + int iters = between(20, 100); + for (int i = 0; i < iters; i++) { + QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + for (Map.Entry> shard : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shard.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + + } + } + + /** + * This test generates 1-10 random queries and executes a profiled and non-profiled + * search for each query. It then does some basic sanity checking of score and hits + * to make sure the profiling doesn't interfere with the hits being returned + */ + public void testProfileMatchesRegular() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + List stringFields = Arrays.asList("field1"); + List numericFields = Arrays.asList("field2"); + + indexRandom(true, docs); + + refresh(); + int iters = between(1, 10); + for (int i = 0; i < iters; i++) { + QueryBuilder q = randomQueryBuilder(stringFields, numericFields, numDocs, 3); + logger.info(q.toString()); + + + SearchRequestBuilder vanilla = client().prepareSearch("test") + .setQuery(q) + .setProfile(false) + .addSort("_score", SortOrder.DESC) + .addSort("_uid", SortOrder.ASC) + .setPreference("_primary") + .setSearchType(SearchType.QUERY_THEN_FETCH); + + SearchRequestBuilder profile = client().prepareSearch("test") + .setQuery(q) + .setProfile(true) + .addSort("_score", SortOrder.DESC) + .addSort("_uid", SortOrder.ASC) + .setPreference("_primary") + .setSearchType(SearchType.QUERY_THEN_FETCH); + + MultiSearchResponse.Item[] responses = client().prepareMultiSearch() + .add(vanilla) + .add(profile) + .execute().actionGet().getResponses(); + + SearchResponse vanillaResponse = responses[0].getResponse(); + SearchResponse profileResponse = responses[1].getResponse(); + + float vanillaMaxScore = vanillaResponse.getHits().getMaxScore(); + float profileMaxScore = profileResponse.getHits().getMaxScore(); + if (Float.isNaN(vanillaMaxScore)) { + assertTrue("Vanilla maxScore is NaN but Profile is not [" + profileMaxScore + "]", + Float.isNaN(profileMaxScore)); + } else { + assertTrue("Profile maxScore of [" + profileMaxScore + "] is not close to Vanilla maxScore [" + vanillaMaxScore + "]", + nearlyEqual(vanillaMaxScore, profileMaxScore, 0.001)); + } + + assertThat("Profile totalHits of [" + profileResponse.getHits().totalHits() + "] is not close to Vanilla totalHits [" + vanillaResponse.getHits().totalHits() + "]", + vanillaResponse.getHits().getTotalHits(), equalTo(profileResponse.getHits().getTotalHits())); + + SearchHit[] vanillaHits = vanillaResponse.getHits().getHits(); + SearchHit[] profileHits = profileResponse.getHits().getHits(); + + for (int j = 0; j < vanillaHits.length; j++) { + assertThat("Profile hit #" + j + " has a different ID from Vanilla", + vanillaHits[j].getId(), equalTo(profileHits[j].getId())); + } + + } + } + + /** + * This test verifies that the output is reasonable for a simple, non-nested query + */ + public void testSimpleMatch() throws Exception { + createIndex("test"); + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + ensureGreen(); + + QueryBuilder q = QueryBuilders.matchQuery("field1", "one"); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + Map> p = resp.getProfileResults(); + assertNotNull(p); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "TermQuery"); + assertEquals(result.getLuceneDescription(), "field1:one"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + /** + * This test verifies that the output is reasonable for a nested query + */ + public void testBool() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")).must(QueryBuilders.matchQuery("field1", "two")); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + Map> p = resp.getProfileResults(); + assertNotNull(p); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertEquals(result.getQueryName(), "BooleanQuery"); + assertEquals(result.getLuceneDescription(), "+field1:one +field1:two"); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + assertEquals(result.getProfiledChildren().size(), 2); + + // Check the children + List children = result.getProfiledChildren(); + assertEquals(children.size(), 2); + + ProfileResult childProfile = children.get(0); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:one"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + assertEquals(childProfile.getProfiledChildren().size(), 0); + + childProfile = children.get(1); + assertEquals(childProfile.getQueryName(), "TermQuery"); + assertEquals(childProfile.getLuceneDescription(), "field1:two"); + assertThat(childProfile.getTime(), greaterThan(0L)); + assertNotNull(childProfile.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + + + } + + /** + * Tests a boolean query with no children clauses + */ + public void testEmptyBool() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boolQuery(); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + /** + * Tests a series of three nested boolean queries with a single "leaf" match query. + * The rewrite process will "collapse" this down to a single bool, so this tests to make sure + * nothing catastrophic happens during that fairly substantial rewrite + */ + public void testCollapsingBool() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.boolQuery().must(QueryBuilders.matchQuery("field1", "one")))); + + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testBoosting() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.boostingQuery(QueryBuilders.matchQuery("field1", "one"), QueryBuilders.matchQuery("field1", "two")) + .boost(randomFloat()) + .negativeBoost(randomFloat()); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testDisMaxRange() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.disMaxQuery() + .boost(0.33703882f) + .add(QueryBuilders.rangeQuery("field2").from(null).to(73).includeLower(true).includeUpper(true)); + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testRange() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + public void testPhrase() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i) + " " + English.intToEnglish(i+1), + "field2", i + ); + } + + indexRandom(true, docs); + + refresh(); + + QueryBuilder q = QueryBuilders.matchPhraseQuery("field1", "one two"); + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch() + .setQuery(q) + .setIndices("test") + .setTypes("type1") + .setProfile(true) + .setSearchType(SearchType.QUERY_THEN_FETCH) + .execute().actionGet(); + + if (resp.getShardFailures().length > 0) { + for (ShardSearchFailure f : resp.getShardFailures()) { + logger.error(f.toString()); + } + fail(); + } + + assertNotNull("Profile response element should not be null", resp.getProfileResults()); + + for (Map.Entry> shardResult : resp.getProfileResults().entrySet()) { + for (ProfileShardResult searchProfiles : shardResult.getValue()) { + for (ProfileResult result : searchProfiles.getQueryResults()) { + assertNotNull(result.getQueryName()); + assertNotNull(result.getLuceneDescription()); + assertThat(result.getTime(), greaterThan(0L)); + assertNotNull(result.getTimeBreakdown()); + } + + CollectorResult result = searchProfiles.getCollectorResult(); + assertThat(result.getName(), not(isEmptyOrNullString())); + assertThat(result.getTime(), greaterThan(0L)); + } + } + } + + /** + * This test makes sure no profile results are returned when profiling is disabled + */ + public void testNoProfile() throws Exception { + createIndex("test"); + ensureGreen(); + + int numDocs = randomIntBetween(100, 150); + IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs]; + for (int i = 0; i < numDocs; i++) { + docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource( + "field1", English.intToEnglish(i), + "field2", i + ); + } + + indexRandom(true, docs); + refresh(); + QueryBuilder q = QueryBuilders.rangeQuery("field2").from(0).to(5); + + logger.info(q.toString()); + + SearchResponse resp = client().prepareSearch().setQuery(q).setProfile(false).execute().actionGet(); + assertThat("Profile response element should be an empty map", resp.getProfileResults().size(), equalTo(0)); + } + +} + diff --git a/core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java b/core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java new file mode 100644 index 00000000000..fb8cd40ce52 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/search/profile/RandomQueryGenerator.java @@ -0,0 +1,266 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.profile; + +import org.apache.lucene.util.English; +import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.index.query.*; + +import java.util.ArrayList; +import java.util.List; + +import static com.carrotsearch.randomizedtesting.RandomizedTest.*; +import static org.junit.Assert.assertTrue; + + +public class RandomQueryGenerator { + public static QueryBuilder randomQueryBuilder(List stringFields, List numericFields, int numDocs, int depth) { + assertTrue("Must supply at least one string field", stringFields.size() > 0); + assertTrue("Must supply at least one numeric field", numericFields.size() > 0); + + // If depth is exhausted, or 50% of the time return a terminal + // Helps limit ridiculously large compound queries + if (depth == 0 || randomBoolean()) { + return randomTerminalQuery(stringFields, numericFields, numDocs); + } + + switch (randomIntBetween(0,5)) { + case 0: + return randomTerminalQuery(stringFields, numericFields, numDocs); + case 1: + return QueryBuilders.boolQuery().must(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1)) + .filter(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1)); + case 2: + return randomBoolQuery(stringFields, numericFields, numDocs, depth); + case 3: + // disabled for now because of https://issues.apache.org/jira/browse/LUCENE-6781 + //return randomBoostingQuery(stringFields, numericFields, numDocs, depth); + case 4: + return randomConstantScoreQuery(stringFields, numericFields, numDocs, depth); + case 5: + return randomDisMaxQuery(stringFields, numericFields, numDocs, depth); + default: + return randomTerminalQuery(stringFields, numericFields, numDocs); + } + } + + private static QueryBuilder randomTerminalQuery(List stringFields, List numericFields, int numDocs) { + switch (randomIntBetween(0,6)) { + case 0: + return randomTermQuery(stringFields, numDocs); + case 1: + return randomTermsQuery(stringFields, numDocs); + case 2: + return randomRangeQuery(numericFields, numDocs); + case 3: + return QueryBuilders.matchAllQuery(); + case 4: + return randomCommonTermsQuery(stringFields, numDocs); + case 5: + return randomFuzzyQuery(stringFields); + case 6: + return randomIDsQuery(); + default: + return randomTermQuery(stringFields, numDocs); + } + } + + private static String randomQueryString(int max) { + StringBuilder qsBuilder = new StringBuilder(); + + for (int i = 0; i < max; i++) { + qsBuilder.append(English.intToEnglish(randomInt(max))); + qsBuilder.append(" "); + } + + return qsBuilder.toString().trim(); + } + + private static String randomField(List fields) { + return fields.get(randomInt(fields.size() - 1)); + } + + + + private static QueryBuilder randomTermQuery(List fields, int numDocs) { + return QueryBuilders.termQuery(randomField(fields), randomQueryString(1)); + } + + private static QueryBuilder randomTermsQuery(List fields, int numDocs) { + int numTerms = randomInt(numDocs); + ArrayList terms = new ArrayList<>(numTerms); + + for (int i = 0; i < numTerms; i++) { + terms.add(randomQueryString(1)); + } + + return QueryBuilders.termsQuery(randomField(fields), terms); + } + + private static QueryBuilder randomRangeQuery(List fields, int numDocs) { + QueryBuilder q = QueryBuilders.rangeQuery(randomField(fields)); + + if (randomBoolean()) { + ((RangeQueryBuilder)q).from(randomIntBetween(0, numDocs / 2 - 1)); + } + if (randomBoolean()) { + ((RangeQueryBuilder)q).to(randomIntBetween(numDocs / 2, numDocs)); + } + + return q; + } + + private static QueryBuilder randomBoolQuery(List stringFields, List numericFields, int numDocs, int depth) { + QueryBuilder q = QueryBuilders.boolQuery(); + int numClause = randomIntBetween(0,5); + for (int i = 0; i < numClause; i++) { + ((BoolQueryBuilder)q).must(randomQueryBuilder(stringFields, numericFields,numDocs, depth -1)); + } + + numClause = randomIntBetween(0,5); + for (int i = 0; i < numClause; i++) { + ((BoolQueryBuilder)q).should(randomQueryBuilder(stringFields, numericFields,numDocs, depth -1)); + } + + numClause = randomIntBetween(0,5); + for (int i = 0; i < numClause; i++) { + ((BoolQueryBuilder)q).mustNot(randomQueryBuilder(stringFields, numericFields, numDocs, depth -1)); + } + + return q; + } + + private static QueryBuilder randomBoostingQuery(List stringFields, List numericFields, int numDocs, int depth) { + return QueryBuilders.boostingQuery( + randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1), + randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)) + .boost(randomFloat()) + .negativeBoost(randomFloat()); + } + + private static QueryBuilder randomConstantScoreQuery(List stringFields, List numericFields, int numDocs, int depth) { + return QueryBuilders.constantScoreQuery(randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)); + } + + private static QueryBuilder randomCommonTermsQuery(List fields, int numDocs) { + int numTerms = randomInt(numDocs); + + QueryBuilder q = QueryBuilders.commonTermsQuery(randomField(fields), randomQueryString(numTerms)); + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).boost(randomFloat()); + } + + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).cutoffFrequency(randomFloat()); + } + + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).highFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms))) + .highFreqOperator(randomBoolean() ? Operator.AND : Operator.OR); + } + + if (randomBoolean()) { + ((CommonTermsQueryBuilder)q).lowFreqMinimumShouldMatch(Integer.toString(randomInt(numTerms))) + .lowFreqOperator(randomBoolean() ? Operator.AND : Operator.OR); + } + + return q; + } + + private static QueryBuilder randomFuzzyQuery(List fields) { + + QueryBuilder q = QueryBuilders.fuzzyQuery(randomField(fields), randomQueryString(1)); + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).boost(randomFloat()); + } + + if (randomBoolean()) { + switch (randomIntBetween(0, 4)) { + case 0: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.AUTO); + break; + case 1: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.ONE); + break; + case 2: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.TWO); + break; + case 3: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.ZERO); + break; + case 4: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.fromEdits(randomIntBetween(0,2))); + break; + default: + ((FuzzyQueryBuilder)q).fuzziness(Fuzziness.AUTO); + break; + } + } + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).maxExpansions(Math.abs(randomInt())); + } + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).prefixLength(Math.abs(randomInt())); + } + + if (randomBoolean()) { + ((FuzzyQueryBuilder)q).transpositions(randomBoolean()); + } + + return q; + } + + private static QueryBuilder randomDisMaxQuery(List stringFields, List numericFields, int numDocs, int depth) { + QueryBuilder q = QueryBuilders.disMaxQuery(); + + int numClauses = randomIntBetween(1, 10); + for (int i = 0; i < numClauses; i++) { + ((DisMaxQueryBuilder)q).add(randomQueryBuilder(stringFields, numericFields, numDocs, depth - 1)); + } + + if (randomBoolean()) { + ((DisMaxQueryBuilder)q).boost(randomFloat()); + } + + if (randomBoolean()) { + ((DisMaxQueryBuilder)q).tieBreaker(randomFloat()); + } + + return q; + } + + private static QueryBuilder randomIDsQuery() { + QueryBuilder q = QueryBuilders.idsQuery(); + + int numIDs = randomInt(100); + for (int i = 0; i < numIDs; i++) { + ((IdsQueryBuilder)q).addIds(String.valueOf(randomInt())); + } + + if (randomBoolean()) { + ((IdsQueryBuilder)q).boost(randomFloat()); + } + + return q; + } +} \ No newline at end of file diff --git a/core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java similarity index 69% rename from core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java rename to core/src/test/java/org/elasticsearch/search/query/ExistsIT.java index 349197d5f48..73906b2ed83 100644 --- a/core/src/test/java/org/elasticsearch/search/query/ExistsMissingIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/ExistsIT.java @@ -43,7 +43,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -public class ExistsMissingIT extends ESIntegTestCase { +public class ExistsIT extends ESIntegTestCase { // TODO: move this to a unit test somewhere... public void testEmptyIndex() throws Exception { @@ -51,11 +51,11 @@ public class ExistsMissingIT extends ESIntegTestCase { ensureYellow("test"); SearchResponse resp = client().prepareSearch("test").setQuery(QueryBuilders.existsQuery("foo")).execute().actionGet(); assertSearchResponse(resp); - resp = client().prepareSearch("test").setQuery(QueryBuilders.missingQuery("foo")).execute().actionGet(); + resp = client().prepareSearch("test").setQuery(QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("foo"))).execute().actionGet(); assertSearchResponse(resp); } - public void testExistsMissing() throws Exception { + public void testExists() throws Exception { XContentBuilder mapping = XContentBuilder.builder(JsonXContent.jsonXContent) .startObject() .startObject("type") @@ -145,62 +145,6 @@ public class ExistsMissingIT extends ESIntegTestCase { } throw e; } - - // missing - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery(fieldName)).execute().actionGet(); - assertSearchResponse(resp); - assertEquals(String.format(Locale.ROOT, "missing(%s, %d) mapping: %s response: %s", fieldName, count, mapping.string(), resp), numDocs - count, resp.getHits().totalHits()); } } - - public void testNullValueUnset() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed")); - indexRandom(true, - client().prepareIndex("idx", "type", "1").setSource("f", "foo"), - client().prepareIndex("idx", "type", "2").setSource("f", null), - client().prepareIndex("idx", "type", "3").setSource("g", "bar"), - client().prepareIndex("idx", "type", "4").setSource("f", "bar")); - - SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get(); - assertSearchHits(resp, "2", "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get(); - assertSearchHits(resp, "2", "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get(); - assertSearchHits(resp); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get(); - fail("both existence and null_value can't be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - - public void testNullValueSet() throws Exception { - assertAcked(client().admin().indices().prepareCreate("idx").addMapping("type", "f", "type=string,index=not_analyzed,null_value=bar")); - indexRandom(true, - client().prepareIndex("idx", "type", "1").setSource("f", "foo"), - client().prepareIndex("idx", "type", "2").setSource("f", null), - client().prepareIndex("idx", "type", "3").setSource("g", "bar"), - client().prepareIndex("idx", "type", "4").setSource("f", "bar")); - - SearchResponse resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, true)).get(); - assertSearchHits(resp, "2", "3", "4"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, true)).get(); - assertSearchHits(resp, "3"); - - resp = client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", true, false)).get(); - assertSearchHits(resp, "2", "4"); - - try { - client().prepareSearch("idx").setQuery(QueryBuilders.missingQuery("f", false, false)).get(); - fail("both existence and null_value can't be false"); - } catch (IllegalArgumentException e) { - // expected - } - } - } diff --git a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 61a237c1a93..9918d449657 100644 --- a/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/core/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -70,7 +70,6 @@ import static org.elasticsearch.index.query.QueryBuilders.idsQuery; import static org.elasticsearch.index.query.QueryBuilders.indicesQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.elasticsearch.index.query.QueryBuilders.missingQuery; import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; @@ -805,32 +804,6 @@ public class SearchQueryIT extends ESIntegTestCase { searchResponse = client().prepareSearch().setQuery(existsQuery("obj1")).get(); assertHitCount(searchResponse, 2l); assertSearchHits(searchResponse, "1", "2"); - - searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(missingQuery("field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(constantScoreQuery(missingQuery("field1"))).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - searchResponse = client().prepareSearch().setQuery(queryStringQuery("_missing_:field1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - // wildcard check - searchResponse = client().prepareSearch().setQuery(missingQuery("x*")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); - - // object check - searchResponse = client().prepareSearch().setQuery(missingQuery("obj1")).get(); - assertHitCount(searchResponse, 2l); - assertSearchHits(searchResponse, "3", "4"); } public void testPassQueryOrFilterAsJSONString() throws Exception { diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 6f00e9977cf..0f5ac1a522f 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -942,13 +942,13 @@ public class CompletionSuggestSearchIT extends ESIntegTestCase { } public void assertSuggestions(String suggestion, String... suggestions) { - String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10); + String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); CompletionSuggestionBuilder suggestionBuilder = SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestion).size(10); assertSuggestions(suggestionName, suggestionBuilder, suggestions); } public void assertSuggestionsNotInOrder(String suggestString, String... suggestions) { - String suggestionName = RandomStrings.randomAsciiOfLength(new Random(), 10); + String suggestionName = RandomStrings.randomAsciiOfLength(random(), 10); SuggestResponse suggestResponse = client().prepareSuggest(INDEX).addSuggestion( SuggestBuilders.completionSuggestion(suggestionName).field(FIELD).text(suggestString).size(10) ).execute().actionGet(); diff --git a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java index 281cf6ae18e..419316b5265 100644 --- a/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java +++ b/core/src/test/java/org/elasticsearch/search/suggest/CustomSuggester.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import java.io.IOException; import java.util.Locale; @@ -42,11 +42,11 @@ public class CustomSuggester extends Suggester> response = new Suggest.Suggestion<>(name, suggestion.getSize()); String firstSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "12"); - Suggest.Suggestion.Entry resultEntry12 = new Suggest.Suggestion.Entry<>(new StringText(firstSuggestion), 0, text.length() + 2); + Suggest.Suggestion.Entry resultEntry12 = new Suggest.Suggestion.Entry<>(new Text(firstSuggestion), 0, text.length() + 2); response.addTerm(resultEntry12); String secondSuggestion = String.format(Locale.ROOT, "%s-%s-%s-%s", text, suggestion.getField(), suggestion.options.get("suffix"), "123"); - Suggest.Suggestion.Entry resultEntry123 = new Suggest.Suggestion.Entry<>(new StringText(secondSuggestion), 0, text.length() + 3); + Suggest.Suggestion.Entry resultEntry123 = new Suggest.Suggestion.Entry<>(new Text(secondSuggestion), 0, text.length() + 3); response.addTerm(resultEntry123); return response; diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 51ae038ca0d..8fde9bbf330 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -56,7 +56,7 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { return settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // Rebalancing is causing some checks after restore to randomly fail // due to https://github.com/elastic/elasticsearch/issues/9421 - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) .build(); } diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index f9392836d8b..7946116f571 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -22,7 +22,6 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -33,24 +32,17 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.index.store.IndexStore; @@ -68,9 +60,9 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.test.TestCustomMetaData; import org.elasticsearch.test.rest.FakeRestRequest; -import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; @@ -123,14 +115,14 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> set test persistent setting"); client.admin().cluster().prepareUpdateSettings().setPersistentSettings( Settings.settingsBuilder() - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 2) - .put(IndicesTTLService.INDICES_TTL_INTERVAL, random, TimeUnit.MINUTES)) + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 2) + .put(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), random, TimeUnit.MINUTES)) .execute().actionGet(); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis())); + .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis())); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1), equalTo(2)); + .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), -1), equalTo(2)); logger.info("--> create repository"); PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") @@ -146,23 +138,26 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest logger.info("--> clean the test persistent setting"); client.admin().cluster().prepareUpdateSettings().setPersistentSettings( Settings.settingsBuilder() - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, 1) - .put(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1))) + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 1) + .put(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(1))) .execute().actionGet(); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(1).millis())); + .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(1).millis())); stopNode(secondNode); assertThat(client.admin().cluster().prepareHealth().setWaitForNodes("1").get().isTimedOut(), equalTo(false)); logger.info("--> restore snapshot"); - client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet(); - assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsTime(IndicesTTLService.INDICES_TTL_INTERVAL, TimeValue.timeValueMinutes(1)).millis(), equalTo(TimeValue.timeValueMinutes(random).millis())); - + try { + client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true).execute().actionGet(); + fail("can't restore minimum master nodes"); + } catch (IllegalArgumentException ex) { + assertEquals("illegal value can't update [discovery.zen.minimum_master_nodes] from [1] to [2]", ex.getMessage()); + assertEquals("cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [1]", ex.getCause().getMessage()); + } logger.info("--> ensure that zen discovery minimum master nodes wasn't restored"); assertThat(client.admin().cluster().prepareState().setRoutingTable(false).setNodes(false).execute().actionGet().getState() - .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, -1), not(equalTo(2))); + .getMetaData().persistentSettings().getAsInt(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), -1), not(equalTo(2))); } public void testRestoreCustomMetadata() throws Exception { @@ -554,7 +549,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest public void testRestoreIndexWithShardsMissingInLocalGateway() throws Exception { logger.info("--> start 2 nodes"); Settings nodeSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE, EnableAllocationDecider.Rebalance.NONE) + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE) .build(); internalCluster().startNode(nodeSettings); @@ -899,78 +894,6 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest )); } - public static abstract class TestCustomMetaData extends AbstractDiffable implements MetaData.Custom { - private final String data; - - protected TestCustomMetaData(String data) { - this.data = data; - } - - public String getData() { - return data; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - TestCustomMetaData that = (TestCustomMetaData) o; - - if (!data.equals(that.data)) return false; - - return true; - } - - @Override - public int hashCode() { - return data.hashCode(); - } - - protected abstract TestCustomMetaData newTestCustomMetaData(String data); - - @Override - public Custom readFrom(StreamInput in) throws IOException { - return newTestCustomMetaData(in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(getData()); - } - - @Override - public Custom fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - String data = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("data".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); - } - data = parser.text(); - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [{}]", currentFieldName); - } - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata"); - } - } - if (data == null) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); - } - return newTestCustomMetaData(data); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.field("data", getData()); - return builder; - } - } - static { MetaData.registerPrototype(SnapshottableMetadata.TYPE, SnapshottableMetadata.PROTO); diff --git a/core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java b/core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java new file mode 100644 index 00000000000..de275eaffca --- /dev/null +++ b/core/src/test/java/org/elasticsearch/test/hamcrest/DoubleMatcher.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.hamcrest; + + +public class DoubleMatcher { + + /** + * Better floating point comparisons courtesy of https://github.com/brazzy/floating-point-gui.de + * + * Snippet adapted to use doubles instead of floats + */ + public static boolean nearlyEqual(double a, double b, double epsilon) { + final double absA = Math.abs(a); + final double absB = Math.abs(b); + final double diff = Math.abs(a - b); + + if (a == b) { // shortcut, handles infinities + return true; + } else if (a == 0 || b == 0 || diff < Double.MIN_NORMAL) { + // a or b is zero or both are extremely close to it + // relative error is less meaningful here + return diff < (epsilon * Double.MIN_NORMAL); + } else { // use relative error + return diff / Math.min((absA + absB), Double.MAX_VALUE) < epsilon; + } + } +} diff --git a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java b/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java deleted file mode 100644 index 3dfca5cb283..00000000000 --- a/core/src/test/java/org/elasticsearch/threadpool/ThreadPoolTypeSettingsValidatorTests.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed wit[√h - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.threadpool; - -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.settings.Validator; -import org.elasticsearch.test.ESTestCase; -import org.junit.Before; - -import java.util.*; - -import static org.junit.Assert.*; - -public class ThreadPoolTypeSettingsValidatorTests extends ESTestCase { - private Validator validator; - - @Before - public void setUp() throws Exception { - super.setUp(); - validator = ThreadPool.THREAD_POOL_TYPE_SETTINGS_VALIDATOR; - } - - public void testValidThreadPoolTypeSettings() { - for (Map.Entry entry : ThreadPool.THREAD_POOL_TYPES.entrySet()) { - assertNull(validateSetting(validator, entry.getKey(), entry.getValue().getType())); - } - } - - public void testInvalidThreadPoolTypeSettings() { - for (Map.Entry entry : ThreadPool.THREAD_POOL_TYPES.entrySet()) { - Set set = new HashSet<>(); - set.addAll(Arrays.asList(ThreadPool.ThreadPoolType.values())); - set.remove(entry.getValue()); - ThreadPool.ThreadPoolType invalidThreadPoolType = randomFrom(set.toArray(new ThreadPool.ThreadPoolType[set.size()])); - String expectedMessage = String.format( - Locale.ROOT, - "thread pool type for [%s] can only be updated to [%s] but was [%s]", - entry.getKey(), - entry.getValue().getType(), - invalidThreadPoolType.getType()); - String message = validateSetting(validator, entry.getKey(), invalidThreadPoolType.getType()); - assertNotNull(message); - assertEquals(expectedMessage, message); - } - } - - public void testNonThreadPoolTypeSetting() { - String setting = ThreadPool.THREADPOOL_GROUP + randomAsciiOfLength(10) + "foo"; - String value = randomAsciiOfLength(10); - assertNull(validator.validate(setting, value, ClusterState.PROTO)); - } - - private String validateSetting(Validator validator, String threadPoolName, String value) { - return validator.validate(ThreadPool.THREADPOOL_GROUP + threadPoolName + ".type", value, ClusterState.PROTO); - } -} diff --git a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java index 95ceea1e490..56b2a03bad1 100644 --- a/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java +++ b/core/src/test/java/org/elasticsearch/threadpool/UpdateThreadPoolSettingsTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.threadpool; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.test.ESTestCase; @@ -90,17 +91,19 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { ThreadPool threadPool = null; try { threadPool = new ThreadPool(settingsBuilder().put("name", "testUpdateSettingsCanNotChangeThreadPoolType").build()); + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); - - threadPool.updateSettings( + clusterSettings.applySettings( settingsBuilder() .put("threadpool." + threadPoolName + ".type", invalidThreadPoolType.getType()) .build() ); fail("expected IllegalArgumentException"); } catch (IllegalArgumentException e) { + assertEquals("illegal value can't update [threadpool.] from [{}] to [{" + threadPoolName + ".type=" + invalidThreadPoolType.getType() + "}]", e.getMessage()); assertThat( - e.getMessage(), + e.getCause().getMessage(), is("setting threadpool." + threadPoolName + ".type to " + invalidThreadPoolType.getType() + " is not permitted; must be " + validThreadPoolType.getType())); } finally { terminateThreadPoolIfNeeded(threadPool); @@ -111,14 +114,16 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.CACHED); ThreadPool threadPool = null; try { - threadPool = new ThreadPool( - Settings.settingsBuilder() - .put("name", "testCachedExecutorType").build()); + Settings nodeSettings = Settings.settingsBuilder() + .put("name", "testCachedExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - threadPool.updateSettings(settingsBuilder() + Settings settings = clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".keep_alive", "10m") .build()); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.CACHED); @@ -134,7 +139,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change keep alive Executor oldExecutor = threadPool.executor(threadPoolName); - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); // Make sure keep alive value changed assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L)); assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L)); @@ -143,7 +148,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); // Set the same keep alive - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".keep_alive", "1m").build()); // Make sure keep alive value didn't change assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(1L)); assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(1L)); @@ -160,11 +165,13 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { ThreadPool threadPool = null; try { - threadPool = new ThreadPool(settingsBuilder() - .put("name", "testCachedExecutorType").build()); + Settings nodeSettings = Settings.settingsBuilder() + .put("name", "testFixedExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); assertThat(threadPool.executor(threadPoolName), instanceOf(EsThreadPoolExecutor.class)); - - threadPool.updateSettings(settingsBuilder() + Settings settings = clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".size", "15") .build()); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); @@ -177,7 +184,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(((EsThreadPoolExecutor) threadPool.executor(threadPoolName)).getKeepAliveTime(TimeUnit.MINUTES), equalTo(0L)); // Put old type back - threadPool.updateSettings(Settings.EMPTY); + settings = clusterSettings.applySettings(Settings.EMPTY); assertEquals(info(threadPool, threadPoolName).getThreadPoolType(), ThreadPool.ThreadPoolType.FIXED); // Make sure keep alive value is not used assertThat(info(threadPool, threadPoolName).getKeepAlive(), nullValue()); @@ -190,7 +197,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change size Executor oldExecutor = threadPool.executor(threadPoolName); - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".size", "10").build()); + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".size", "10").build()); // Make sure size values changed assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); assertThat(info(threadPool, threadPoolName).getMin(), equalTo(10)); @@ -201,8 +208,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { assertThat(threadPool.executor(threadPoolName), sameInstance(oldExecutor)); // Change queue capacity - threadPool.updateSettings(settingsBuilder() - .put("threadpool." + threadPoolName + ".queue", "500") + settings = clusterSettings.applySettings(settingsBuilder().put(settings).put("threadpool." + threadPoolName + ".queue", "500") .build()); } finally { terminateThreadPoolIfNeeded(threadPool); @@ -213,9 +219,12 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.SCALING); ThreadPool threadPool = null; try { - threadPool = new ThreadPool(settingsBuilder() + Settings nodeSettings = settingsBuilder() .put("threadpool." + threadPoolName + ".size", 10) - .put("name", "testCachedExecutorType").build()); + .put("name", "testScalingExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); assertThat(info(threadPool, threadPoolName).getMin(), equalTo(1)); assertThat(info(threadPool, threadPoolName).getMax(), equalTo(10)); assertThat(info(threadPool, threadPoolName).getKeepAlive().minutes(), equalTo(5L)); @@ -224,7 +233,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { // Change settings that doesn't require pool replacement Executor oldExecutor = threadPool.executor(threadPoolName); - threadPool.updateSettings(settingsBuilder() + clusterSettings.applySettings(settingsBuilder() .put("threadpool." + threadPoolName + ".keep_alive", "10m") .put("threadpool." + threadPoolName + ".min", "2") .put("threadpool." + threadPoolName + ".size", "15") @@ -248,9 +257,12 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { String threadPoolName = randomThreadPool(ThreadPool.ThreadPoolType.FIXED); ThreadPool threadPool = null; try { - threadPool = new ThreadPool(Settings.settingsBuilder() + Settings nodeSettings = Settings.settingsBuilder() .put("threadpool." + threadPoolName + ".queue_size", 1000) - .put("name", "testCachedExecutorType").build()); + .put("name", "testCachedExecutorType").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); assertEquals(info(threadPool, threadPoolName).getQueueSize().getSingles(), 1000L); final CountDownLatch latch = new CountDownLatch(1); @@ -264,7 +276,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { } } ); - threadPool.updateSettings(settingsBuilder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); + clusterSettings.applySettings(settingsBuilder().put("threadpool." + threadPoolName + ".queue_size", 2000).build()); assertThat(threadPool.executor(threadPoolName), not(sameInstance(oldExecutor))); assertThat(oldExecutor.isShutdown(), equalTo(true)); assertThat(oldExecutor.isTerminating(), equalTo(true)); @@ -279,12 +291,15 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { public void testCustomThreadPool() throws Exception { ThreadPool threadPool = null; try { - threadPool = new ThreadPool(Settings.settingsBuilder() + Settings nodeSettings = Settings.settingsBuilder() .put("threadpool.my_pool1.type", "scaling") .put("threadpool.my_pool2.type", "fixed") .put("threadpool.my_pool2.size", "1") .put("threadpool.my_pool2.queue_size", "1") - .put("name", "testCustomThreadPool").build()); + .put("name", "testCustomThreadPool").build(); + threadPool = new ThreadPool(nodeSettings); + ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + threadPool.setClusterSettings(clusterSettings); ThreadPoolInfo groups = threadPool.info(); boolean foundPool1 = false; boolean foundPool2 = false; @@ -316,7 +331,7 @@ public class UpdateThreadPoolSettingsTests extends ESTestCase { Settings settings = Settings.builder() .put("threadpool.my_pool2.size", "10") .build(); - threadPool.updateSettings(settings); + clusterSettings.applySettings(settings); groups = threadPool.info(); foundPool1 = false; diff --git a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index becb61666da..6599412834d 100644 --- a/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/core/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -69,12 +70,12 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { super.setUp(); threadPool = new ThreadPool(getClass().getName()); serviceA = build( - Settings.builder().put("name", "TS_A", TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), + Settings.builder().put("name", "TS_A", TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING").build(), version0, new NamedWriteableRegistry() ); nodeA = new DiscoveryNode("TS_A", "TS_A", serviceA.boundAddress().publishAddress(), emptyMap(), version0); serviceB = build( - Settings.builder().put("name", "TS_B", TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), + Settings.builder().put("name", "TS_B", TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING").build(), version1, new NamedWriteableRegistry() ); nodeB = new DiscoveryNode("TS_B", "TS_B", serviceB.boundAddress().publishAddress(), emptyMap(), version1); @@ -650,9 +651,10 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { includeSettings = "test"; excludeSettings = "DOESN'T_MATCH"; } - - serviceA.applySettings(Settings.builder() - .put(TransportService.SETTING_TRACE_LOG_INCLUDE, includeSettings, TransportService.SETTING_TRACE_LOG_EXCLUDE, excludeSettings) + ClusterSettings service = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + serviceA.setDynamicSettings(service); + service.applySettings(Settings.builder() + .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), includeSettings, TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), excludeSettings) .build()); tracer.reset(4); diff --git a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java index 3f140b388fd..7a3fd88f93b 100644 --- a/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java +++ b/core/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.transport; import org.elasticsearch.Version; import org.elasticsearch.cache.recycler.MockPageCacheRecycler; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; @@ -28,7 +29,6 @@ import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.netty.NettyTransport; @@ -64,7 +64,7 @@ public class NettySizeHeaderFrameDecoderTests extends ESTestCase { @Before public void startThreadPool() { threadPool = new ThreadPool(settings); - threadPool.setNodeSettingsService(new NodeSettingsService(settings)); + threadPool.setClusterSettings(new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)); NetworkService networkService = new NetworkService(settings); BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT, new NamedWriteableRegistry()); diff --git a/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java new file mode 100644 index 00000000000..d587ab05e45 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/transport/TransportModuleTests.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.inject.ModuleTestCase; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.transport.AssertingLocalTransport; +import org.elasticsearch.threadpool.ThreadPool; + +/** Unit tests for module registering custom transport and transport service */ +public class TransportModuleTests extends ModuleTestCase { + + + + static class FakeTransport extends AssertingLocalTransport { + @Inject + public FakeTransport(Settings settings, ThreadPool threadPool, Version version, NamedWriteableRegistry namedWriteableRegistry) { + super(settings, threadPool, version, namedWriteableRegistry); + } + } + + static class FakeTransportService extends TransportService { + @Inject + public FakeTransportService(Settings settings, Transport transport, ThreadPool threadPool) { + super(settings, transport, threadPool); + } + } +} diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java index c84a9eb9a55..78caef498d1 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportIT.java @@ -21,13 +21,14 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.ESLogger; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; @@ -40,7 +41,6 @@ import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.transport.RequestHandlerRegistry; -import org.elasticsearch.transport.TransportModule; import org.elasticsearch.transport.TransportRequest; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; @@ -66,7 +66,7 @@ public class NettyTransportIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return settingsBuilder().put(super.nodeSettings(nodeOrdinal)) .put("node.mode", "network") - .put(TransportModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); + .put(NetworkModule.TRANSPORT_TYPE_KEY, "exception-throwing").build(); } @Override @@ -99,8 +99,8 @@ public class NettyTransportIT extends ESIntegTestCase { public String description() { return "an exception throwing transport for testing"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransport("exception-throwing", ExceptionThrowingNettyTransport.class); + public void onModule(NetworkModule module) { + module.registerTransport("exception-throwing", ExceptionThrowingNettyTransport.class); } } diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java index 59ef26c42af..ee49012291d 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortIntegrationIT.java @@ -19,11 +19,12 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; @@ -31,7 +32,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.test.junit.annotations.Network; -import org.elasticsearch.transport.TransportModule; import java.net.InetAddress; import java.util.Locale; @@ -60,7 +60,7 @@ public class NettyTransportMultiPortIntegrationIT extends ESIntegTestCase { Settings.Builder builder = settingsBuilder() .put(super.nodeSettings(nodeOrdinal)) .put("network.host", "127.0.0.1") - .put(TransportModule.TRANSPORT_TYPE_KEY, "netty") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "netty") .put("node.mode", "network") .put("transport.profiles.client1.port", randomPortRange) .put("transport.profiles.client1.publish_host", "127.0.0.7") @@ -72,7 +72,7 @@ public class NettyTransportMultiPortIntegrationIT extends ESIntegTestCase { public void testThatTransportClientCanConnect() throws Exception { Settings settings = settingsBuilder() .put("cluster.name", internalCluster().getClusterName()) - .put(TransportModule.TRANSPORT_TYPE_KEY, "netty") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "netty") .put("path.home", createTempDir().toString()) .build(); try (TransportClient transportClient = TransportClient.builder().settings(settings).build()) { diff --git a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java index 3437701f6c9..ea67ce32717 100644 --- a/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java +++ b/core/src/test/java/org/elasticsearch/transport/netty/NettyTransportPublishAddressIT.java @@ -21,22 +21,19 @@ package org.elasticsearch.transport.netty; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.InetSocketTransportAddress; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.transport.TransportModule; import java.net.Inet4Address; -import java.net.Inet6Address; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.not; /** * Checks that Elasticsearch produces a sane publish_address when it binds to @@ -48,7 +45,7 @@ public class NettyTransportPublishAddressIT extends ESIntegTestCase { protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(TransportModule.TRANSPORT_TYPE_KEY, "netty") + .put(NetworkModule.TRANSPORT_TYPE_KEY, "netty") .put("node.mode", "network").build(); } diff --git a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java index 28a3dea118e..1350dcbb8ed 100644 --- a/core/src/test/java/org/elasticsearch/tribe/TribeIT.java +++ b/core/src/test/java/org/elasticsearch/tribe/TribeIT.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; import org.elasticsearch.node.Node; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.NodeConfigurationSource; @@ -47,6 +48,7 @@ import org.junit.BeforeClass; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Map; @@ -79,6 +81,11 @@ public class TribeIT extends ESIntegTestCase { return Settings.builder().put(Node.HTTP_ENABLED, false).build(); } + @Override + public Collection> nodePlugins() { + return Collections.emptyList(); + } + @Override public Settings transportClientSettings() { return null; @@ -86,7 +93,7 @@ public class TribeIT extends ESIntegTestCase { }; cluster2 = new InternalTestCluster(InternalTestCluster.configuredNodeMode(), randomLong(), createTempDir(), 2, 2, - Strings.randomBase64UUID(getRandom()), nodeConfigurationSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, true); + Strings.randomBase64UUID(getRandom()), nodeConfigurationSource, 0, false, SECOND_CLUSTER_NODE_PREFIX, Collections.emptyList()); cluster2.beforeTest(getRandom(), 0.1); cluster2.ensureAtLeastNumDataNodes(2); diff --git a/core/src/test/java/org/elasticsearch/update/UpdateIT.java b/core/src/test/java/org/elasticsearch/update/UpdateIT.java index a789bb48774..09887d83541 100644 --- a/core/src/test/java/org/elasticsearch/update/UpdateIT.java +++ b/core/src/test/java/org/elasticsearch/update/UpdateIT.java @@ -120,7 +120,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return new Object(); // unused } @@ -218,7 +218,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return script; } @@ -309,7 +309,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return new Object(); // unused } @@ -400,7 +400,7 @@ public class UpdateIT extends ESIntegTestCase { } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { return new Object(); // unused } diff --git a/core/src/test/resources/indices/bwc/index-2.0.2.zip b/core/src/test/resources/indices/bwc/index-2.0.2.zip new file mode 100644 index 00000000000..2f77405a831 Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-2.0.2.zip differ diff --git a/core/src/test/resources/indices/bwc/index-2.1.1.zip b/core/src/test/resources/indices/bwc/index-2.1.1.zip new file mode 100644 index 00000000000..74c967d2c61 Binary files /dev/null and b/core/src/test/resources/indices/bwc/index-2.1.1.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.0.2.zip b/core/src/test/resources/indices/bwc/repo-2.0.2.zip new file mode 100644 index 00000000000..696ffd939d5 Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-2.0.2.zip differ diff --git a/core/src/test/resources/indices/bwc/repo-2.1.1.zip b/core/src/test/resources/indices/bwc/repo-2.1.1.zip new file mode 100644 index 00000000000..3253da62c3f Binary files /dev/null and b/core/src/test/resources/indices/bwc/repo-2.1.1.zip differ diff --git a/docs/java-api/docs/bulk.asciidoc b/docs/java-api/docs/bulk.asciidoc index 6890f7c49d3..248326700c4 100644 --- a/docs/java-api/docs/bulk.asciidoc +++ b/docs/java-api/docs/bulk.asciidoc @@ -47,6 +47,7 @@ To use it, first create a `BulkProcessor` instance: [source,java] -------------------------------------------------- +import org.elasticsearch.action.bulk.BackoffPolicy; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -73,6 +74,8 @@ BulkProcessor bulkProcessor = BulkProcessor.builder( .setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) <6> .setFlushInterval(TimeValue.timeValueSeconds(5)) <7> .setConcurrentRequests(1) <8> + .setBackoffPolicy( + BackoffPolicy.exponentialBackoff(TimeValue.timeValueMillis(100), 3)) <9> .build(); -------------------------------------------------- <1> Add your elasticsearch client @@ -86,6 +89,10 @@ BulkProcessor bulkProcessor = BulkProcessor.builder( <7> We want to flush the bulk every 5 seconds whatever the number of requests <8> Set the number of concurrent requests. A value of 0 means that only a single request will be allowed to be executed. A value of 1 means 1 concurrent request is allowed to be executed while accumulating new bulk requests. +<9> Set a custom backoff policy which will initially wait for 100ms, increase exponentially and retries up to three + times. A retry is attempted whenever one or more bulk item requests have failed with an `EsRejectedExecutionException` + which indicates that there were too little compute resources available for processing the request. To disable backoff, + pass `BackoffPolicy.noBackoff()`. Then you can simply add your requests to the `BulkProcessor`: @@ -101,6 +108,7 @@ By default, `BulkProcessor`: * sets bulkSize to `5mb` * does not set flushInterval * sets concurrentRequests to 1 +* sets backoffPolicy to an exponential backoff with 8 retries and a start delay of 50ms. The total wait time is roughly 5.1 seconds. When all documents are loaded to the `BulkProcessor` it can be closed by using `awaitClose` or `close` methods: diff --git a/docs/java-api/query-dsl/missing-query.asciidoc b/docs/java-api/query-dsl/missing-query.asciidoc deleted file mode 100644 index 00086cf737b..00000000000 --- a/docs/java-api/query-dsl/missing-query.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -[[java-query-dsl-missing-query]] -==== Missing Query - -See {ref}/query-dsl-missing-query.html[Missing Query] - -[source,java] --------------------------------------------------- -QueryBuilder qb = missingQuery("user", <1> - true, <2> - true); <3> --------------------------------------------------- -<1> field -<2> find missing field with an explicit `null` value -<3> find missing field that doesn’t exist diff --git a/docs/java-api/query-dsl/term-level-queries.asciidoc b/docs/java-api/query-dsl/term-level-queries.asciidoc index 44fc3639072..e7d5ad4e52b 100644 --- a/docs/java-api/query-dsl/term-level-queries.asciidoc +++ b/docs/java-api/query-dsl/term-level-queries.asciidoc @@ -30,11 +30,6 @@ The queries in this group are: Find documents where the field specified contains any non-null value. -<>:: - - Find documents where the field specified does is missing or contains only - `null` values. - <>:: Find documents where the field specified contains terms which being with @@ -75,8 +70,6 @@ include::range-query.asciidoc[] include::exists-query.asciidoc[] -include::missing-query.asciidoc[] - include::prefix-query.asciidoc[] include::wildcard-query.asciidoc[] @@ -88,6 +81,3 @@ include::fuzzy-query.asciidoc[] include::type-query.asciidoc[] include::ids-query.asciidoc[] - - - diff --git a/docs/plugins/authors.asciidoc b/docs/plugins/authors.asciidoc index 75b7776ec09..9461ba8dd53 100644 --- a/docs/plugins/authors.asciidoc +++ b/docs/plugins/authors.asciidoc @@ -17,7 +17,7 @@ listed in this documentation for inspiration. ==================================== The example site plugin mentioned above contains all of the scaffolding needed -for integrating with Maven builds. If you don't plan on using Maven, then all +for integrating with Gradle builds. If you don't plan on using Gradle, then all you really need in your plugin is: * The `plugin-descriptor.properties` file @@ -33,14 +33,14 @@ All plugins, be they site or Java plugins, must contain a file called `plugin-descriptor.properties` in the root directory. The format for this file is described in detail here: -https://github.com/elastic/elasticsearch/blob/master/dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties[`dev-tools/src/main/resources/plugin-metadata/plugin-descriptor.properties`]. +https://github.com/elastic/elasticsearch/blob/master/buildSrc/src/main/resources/plugin-descriptor.properties[`/buildSrc/src/main/resources/plugin-descriptor.properties`]. Either fill in this template yourself (see https://github.com/lmenezes/elasticsearch-kopf/blob/master/plugin-descriptor.properties[elasticsearch-kopf] -as an example) or, if you are using Elasticsearch's Maven build system, you -can fill in the necessary values in the `pom.xml` for your plugin. For +as an example) or, if you are using Elasticsearch's Gradle build system, you +can fill in the necessary values in the `build.gradle` file for your plugin. For instance, see -https://github.com/elastic/elasticsearch/blob/master/plugins/site-example/pom.xml[`plugins/site-example/pom.xml`]. +https://github.com/elastic/elasticsearch/blob/master/plugins/site-example/build.gradle[`/plugins/site-example/build.gradle`]. [float] ==== Mandatory elements for all plugins @@ -123,13 +123,13 @@ Read more in {ref}/integration-tests.html#changing-node-configuration[Changing N === Java Security permissions Some plugins may need additional security permissions. A plugin can include -the optional `plugin-security.policy` file containing `grant` statements for -additional permissions. Any additional permissions will be displayed to the user -with a large warning, and they will have to confirm them when installing the +the optional `plugin-security.policy` file containing `grant` statements for +additional permissions. Any additional permissions will be displayed to the user +with a large warning, and they will have to confirm them when installing the plugin interactively. So if possible, it is best to avoid requesting any spurious permissions! -If you are using the elasticsearch Maven build system, place this file in +If you are using the elasticsearch Gradle build system, place this file in `src/main/plugin-metadata` and it will be applied during unit tests as well. Keep in mind that the Java security model is stack-based, and the additional diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc new file mode 100644 index 00000000000..114dbf13035 --- /dev/null +++ b/docs/plugins/repository-hdfs.asciidoc @@ -0,0 +1,118 @@ +[[repository-hdfs]] +=== Hadoop HDFS Repository Plugin + +The HDFS repository plugin adds support for using HDFS File System as a repository for +{ref}/modules-snapshots.html[Snapshot/Restore]. + +[[repository-hdfs-install]] +[float] +==== Installation + +This plugin can be installed using the plugin manager using _one_ of the following packages: + +[source,sh] +---------------------------------------------------------------- +sudo bin/plugin install repository-hdfs +sudo bin/plugin install repository-hdfs-hadoop2 +sudo bin/plugin install repository-hdfs-lite +---------------------------------------------------------------- + +The chosen plugin must be installed on every node in the cluster, and each node must +be restarted after installation. + +[[repository-hdfs-remove]] +[float] +==== Removal + +The plugin can be removed by specifying the _installed_ package using _one_ of the following commands: + +[source,sh] +---------------------------------------------------------------- +sudo bin/plugin remove repository-hdfs +sudo bin/plugin remove repository-hdfs-hadoop2 +sudo bin/plugin remove repository-hdfs-lite +---------------------------------------------------------------- + +The node must be stopped before removing the plugin. + +[[repository-hdfs-usage]] +==== Getting started with HDFS + +The HDFS snapshot/restore plugin comes in three _flavors_: + +* Default / Hadoop 1.x:: +The default version contains the plugin jar alongside Apache Hadoop 1.x (stable) dependencies. +* YARN / Hadoop 2.x:: +The `hadoop2` version contains the plugin jar plus the Apache Hadoop 2.x (also known as YARN) dependencies. +* Lite:: +The `lite` version contains just the plugin jar, without any Hadoop dependencies. The user should provide these (read below). + +[[repository-hdfs-flavor]] +===== What version to use? + +It depends on whether Hadoop is locally installed or not and if not, whether it is compatible with Apache Hadoop clients. + +* Are you using Apache Hadoop (or a _compatible_ distro) and do not have installed on the Elasticsearch nodes?:: ++ +If the answer is yes, for Apache Hadoop 1 use the default `repository-hdfs` or `repository-hdfs-hadoop2` for Apache Hadoop 2. ++ +* If you are have Hadoop installed locally on the Elasticsearch nodes or are using a certain distro:: ++ +Use the `lite` version and place your Hadoop _client_ jars and their dependencies in the plugin folder under `hadoop-libs`. +For large deployments, it is recommended to package the libraries in the plugin zip and deploy it manually across nodes +(and thus avoiding having to do the libraries setup on each node). + +[[repository-hdfs-security]] +==== Handling JVM Security and Permissions + +Out of the box, Elasticsearch runs in a JVM with the security manager turned _on_ to make sure that unsafe or sensitive actions +are allowed only from trusted code. Hadoop however is not really designed to run under one; it does not rely on privileged blocks +to execute sensitive code, of which it uses plenty. + +The `repository-hdfs` plugin provides the necessary permissions for both Apache Hadoop 1.x and 2.x (latest versions) to successfully +run in a secured JVM as one can tell from the number of permissions required when installing the plugin. +However using a certain Hadoop File-System (outside DFS), a certain distro or operating system (in particular Windows), might require +additional permissions which are not provided by the plugin. + +In this case there are several workarounds: +* add the permission into `plugin-security.policy` (available in the plugin folder) + +* disable the security manager through `es.security.manager.enabled=false` configurations setting - NOT RECOMMENDED + +If you find yourself in such a situation, please let us know what Hadoop distro version and OS you are using and what permission is missing +by raising an issue. Thank you! + +[[repository-hdfs-config]] +==== Configuration Properties + +Once installed, define the configuration for the `hdfs` repository through `elasticsearch.yml` or the +{ref}/modules-snapshots.html[REST API]: + +[source,yaml] +---- +repositories + hdfs: + uri: "hdfs://:/" \# optional - Hadoop file-system URI + path: "some/path" \# required - path with the file-system where data is stored/loaded + load_defaults: "true" \# optional - whether to load the default Hadoop configuration (default) or not + conf_location: "extra-cfg.xml" \# optional - Hadoop configuration XML to be loaded (use commas for multi values) + conf. : "" \# optional - 'inlined' key=value added to the Hadoop configuration + concurrent_streams: 5 \# optional - the number of concurrent streams (defaults to 5) + compress: "false" \# optional - whether to compress the metadata or not (default) + chunk_size: "10mb" \# optional - chunk size (disabled by default) + +---- + +NOTE: Be careful when including a paths within the `uri` setting; Some implementations ignore them completely while +others consider them. In general, we recommend keeping the `uri` to a minimum and using the `path` element instead. + +[[repository-hdfs-other-fs]] +==== Plugging other file-systems + +Any HDFS-compatible file-systems (like Amazon `s3://` or Google `gs://`) can be used as long as the proper Hadoop +configuration is passed to the Elasticsearch plugin. In practice, this means making sure the correct Hadoop configuration +files (`core-site.xml` and `hdfs-site.xml`) and its jars are available in plugin classpath, just as you would with any +other Hadoop client or job. + +Otherwise, the plugin will only read the _default_, vanilla configuration of Hadoop and will not be able to recognized +the plugged-in file-system. diff --git a/docs/plugins/repository.asciidoc b/docs/plugins/repository.asciidoc index 554fa34b033..5706fc74c12 100644 --- a/docs/plugins/repository.asciidoc +++ b/docs/plugins/repository.asciidoc @@ -18,10 +18,9 @@ The S3 repository plugin adds support for using S3 as a repository. The Azure repository plugin adds support for using Azure as a repository. -https://github.com/elastic/elasticsearch-hadoop/tree/master/repository-hdfs[Hadoop HDFS Repository]:: +<>:: -The Hadoop HDFS Repository plugin adds support for using an HDFS file system -as a repository. +The Hadoop HDFS Repository plugin adds support for using HDFS as a repository. [float] @@ -40,3 +39,5 @@ include::repository-azure.asciidoc[] include::repository-s3.asciidoc[] +include::repository-hdfs.asciidoc[] + diff --git a/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc index 293b51a0331..b64f5edbeb9 100644 --- a/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/delimited-payload-tokenfilter.asciidoc @@ -3,9 +3,7 @@ Named `delimited_payload_filter`. Splits tokens into tokens and payload whenever a delimiter character is found. -Example: "the|1 quick|2 fox|3" is split per default int to tokens `fox`, `quick` and `the` with payloads `1`, `2` and `3` respectively. - - +Example: "the|1 quick|2 fox|3" is split by default into tokens `the`, `quick`, and `fox` with payloads `1`, `2`, and `3` respectively. Parameters: diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 7d9bdc1b041..137b4ac48cd 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -87,6 +87,10 @@ The cluster health API accepts the following request parameters: A time based parameter controlling how long to wait if one of the wait_for_XXX are provided. Defaults to `30s`. +`local`:: + If `true` returns the local node information and does not provide + the state from master node. Default: `false`. + The following is an example of getting the cluster health at the `shards` level: diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 08f4c900597..8ec58424730 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -38,6 +38,44 @@ last example will be: }' -------------------------------------------------- +Resetting persistent or transient settings can be done by assigning a +`null` value. If a transient setting is reset, the persistent setting +is applied if available. Otherwise Elasticsearch will fallback to the setting +defined at the configuration file or, if not existent, to the default +value. Here is an example: + +[source,js] +-------------------------------------------------- +curl -XPUT localhost:9200/_cluster/settings -d '{ + "transient" : { + "discovery.zen.minimum_master_nodes" : null + } +}' +-------------------------------------------------- + +Reset settings will not be included in the cluster response. So +the response for the last example will be: + +[source,js] +-------------------------------------------------- +{ + "persistent" : {}, + "transient" : {} +} +-------------------------------------------------- + +Settings can also be reset using simple wildcards. For instance to reset +all dynamic `discovery.zen` setting a prefix can be used: + +[source,js] +-------------------------------------------------- +curl -XPUT localhost:9200/_cluster/settings -d '{ + "transient" : { + "discovery.zen.*" : null + } +}' +-------------------------------------------------- + Cluster wide settings can be returned using: [source,js] diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index a2b73a44842..56e9d4ddb91 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -129,19 +129,6 @@ specific index module: experimental[] Disables the purge of <> on the current index. -[[index.recovery.initial_shards]]`index.recovery.initial_shards`:: -+ --- -A primary shard is only recovered only if there are enough nodes available to -allocate sufficient replicas to form a quorum. It can be set to: - - * `quorum` (default) - * `quorum-1` (or `half`) - * `full` - * `full-1`. - * Number values are also supported, e.g. `1`. --- - [float] === Settings in other index modules diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 0ade819dc0c..ddec26b8030 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -112,7 +112,10 @@ Type name: `DFR` ==== IB similarity. http://lucene.apache.org/core/5_2_1/core/org/apache/lucene/search/similarities/IBSimilarity.html[Information -based model] . This similarity has the following options: +based model] . The algorithm is based on the concept that the information content in any symbolic 'distribution' +sequence is primarily determined by the repetitive usage of its basic elements. +For written texts this challenge would correspond to comparing the writing styles of diferent authors. +This similarity has the following options: [horizontal] `distribution`:: Possible values: `ll` and `spl`. @@ -138,11 +141,11 @@ Type name: `LMDirichlet` ==== LM Jelinek Mercer similarity. http://lucene.apache.org/core/5_2_1/core/org/apache/lucene/search/similarities/LMJelinekMercerSimilarity.html[LM -Jelinek Mercer similarity] . This similarity has the following options: +Jelinek Mercer similarity] . The algorithm attempts to capture important patterns in the text, while leaving out noise. This similarity has the following options: [horizontal] `lambda`:: The optimal value depends on both the collection and the query. The optimal value is around `0.1` -for title queries and `0.7` for long queries. Default to `0.1`. +for title queries and `0.7` for long queries. Default to `0.1`. When value approaches `0`, documents that match more query terms will be ranked higher than those that match fewer terms. Type name: `LMJelinekMercer` diff --git a/docs/reference/indices/shadow-replicas.asciidoc b/docs/reference/indices/shadow-replicas.asciidoc index da74a651242..0d589adb64a 100644 --- a/docs/reference/indices/shadow-replicas.asciidoc +++ b/docs/reference/indices/shadow-replicas.asciidoc @@ -104,9 +104,8 @@ settings API: `index.shared_filesystem.recover_on_any_node`:: Boolean value indicating whether the primary shards for the index should be - allowed to recover on any node in the cluster, regardless of the number of - replicas or whether the node has previously had the shard allocated to it - before. Defaults to `false`. + allowed to recover on any node in the cluster. If a node holding a copy of + the shard is found, recovery prefers that node. Defaults to `false`. === Node level settings related to shadow replicas diff --git a/docs/reference/indices/shard-stores.asciidoc b/docs/reference/indices/shard-stores.asciidoc index d4d385bd6dc..19acbc44d3f 100644 --- a/docs/reference/indices/shard-stores.asciidoc +++ b/docs/reference/indices/shard-stores.asciidoc @@ -52,8 +52,9 @@ The shard stores information is grouped by indices and shard ids. } }, "version": 4, <4> + "allocation_id": "2iNySv_OQVePRX-yaRH_lQ", <5> "allocation" : "primary" | "replica" | "unused", <6> - "store_exception": ... <5> + "store_exception": ... <7> }, ... ] @@ -66,7 +67,8 @@ The shard stores information is grouped by indices and shard ids. <3> The node information that hosts a copy of the store, the key is the unique node id. <4> The version of the store copy -<5> The status of the store copy, whether it is used as a +<5> The allocation id of the store copy +<6> The status of the store copy, whether it is used as a primary, replica or not used at all -<6> Any exception encountered while opening the shard index or +<7> Any exception encountered while opening the shard index or from earlier engine failure diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index e38fc31cb37..b60c5f0510e 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -148,13 +148,14 @@ PUT my_index/my_type/1 [[match-pattern]] ==== `match_pattern` -The `match_pattern` parameter behaves just like the `match` parameter, but -supports full Java regular expression matching on the field name instead of -simple wildcards, for instance: +The `match_pattern` parameter adjusts the behavior of the `match` parameter +such that it supports full Java regular expression matching on the field name +instead of simple wildcards, for instance: [source,js] -------------------------------------------------- - "match_pattern": "^profit_\d+$" + "match_pattern": "regex", + "match": "^profit_\d+$" -------------------------------------------------- [[path-match-unmatch]] diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index 2c40f72bbea..bafc3e3f7d9 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -3,9 +3,8 @@ The `_field_names` field indexes the names of every field in a document that contains any value other than `null`. This field is used by the -<> and <> -queries to find documents that either have or don't have any non-+null+ value -for a particular field. +<> query to find documents that +either have or don't have any non-+null+ value for a particular field. The value of the `_field_name` field is accessible in queries, aggregations, and scripts: @@ -49,7 +48,6 @@ GET my_index/_search -------------------------- // AUTOSENSE -<1> Querying on the `_field_names` field (also see the <> and <> queries) +<1> Querying on the `_field_names` field (also see the <> query) <2> Aggregating on the `_field_names` field <3> Accessing the `_field_names` field in scripts (inline scripts must be <> for this example to work) - diff --git a/docs/reference/mapping/params/null-value.asciidoc b/docs/reference/mapping/params/null-value.asciidoc index 552ce66ded8..4d70d4a6ac5 100644 --- a/docs/reference/mapping/params/null-value.asciidoc +++ b/docs/reference/mapping/params/null-value.asciidoc @@ -53,7 +53,3 @@ IMPORTANT: The `null_value` needs to be the same datatype as the field. For instance, a `long` field cannot have a string `null_value`. String fields which are `analyzed` will also pass the `null_value` through the configured analyzer. - -Also see the <> for its `null_value` support. - - diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index d974847a98a..1f0c76e1b93 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -17,13 +17,13 @@ The geo_shape mapping maps geo_json geometry objects to the geo_shape type. To enable it, users must explicitly map fields to the geo_shape type. -[cols="<,<",options="header",] +[cols="<,<,<",options="header",] |======================================================================= -|Option |Description +|Option |Description| Default |`tree` |Name of the PrefixTree implementation to be used: `geohash` for -GeohashPrefixTree and `quadtree` for QuadPrefixTree. Defaults to -`geohash`. +GeohashPrefixTree and `quadtree` for QuadPrefixTree. +| `geohash` |`precision` |This parameter may be used instead of `tree_levels` to set an appropriate value for the `tree_levels` parameter. The value @@ -31,7 +31,8 @@ specifies the desired precision and Elasticsearch will calculate the best tree_levels value to honor this precision. The value should be a number followed by an optional distance unit. Valid distance units include: `in`, `inch`, `yd`, `yard`, `mi`, `miles`, `km`, `kilometers`, -`m`,`meters` (default), `cm`,`centimeters`, `mm`, `millimeters`. +`m`,`meters`, `cm`,`centimeters`, `mm`, `millimeters`. +| `meters` |`tree_levels` |Maximum number of layers to be used by the PrefixTree. This can be used to control the precision of shape representations and @@ -41,27 +42,40 @@ certain level of understanding of the underlying implementation, users may use the `precision` parameter instead. However, Elasticsearch only uses the tree_levels parameter internally and this is what is returned via the mapping API even if you use the precision parameter. +| `50m` + +|`strategy` |The strategy parameter defines the approach for how to +represent shapes at indexing and search time. It also influences the +capabilities available so it is recommended to let Elasticsearch set +this parameter automatically. There are two strategies available: +`recursive` and `term`. Term strategy supports point types only (the +`points_only` parameter will be automatically set to true) while +Recursive strategy supports all shape types. (IMPORTANT: see +<> for more detailed information) +| `recursive` |`distance_error_pct` |Used as a hint to the PrefixTree about how precise it should be. Defaults to 0.025 (2.5%) with 0.5 as the maximum -supported value. PERFORMANCE NOTE: This value will be default to 0 if a `precision` or +supported value. PERFORMANCE NOTE: This value will default to 0 if a `precision` or `tree_level` definition is explicitly defined. This guarantees spatial precision at the level defined in the mapping. This can lead to significant memory usage for high resolution shapes with low error (e.g., large shapes at 1m with < 0.001 error). To improve indexing performance (at the cost of query accuracy) explicitly define `tree_level` or `precision` along with a reasonable `distance_error_pct`, noting that large shapes will have greater false positives. +| `0.025` |`orientation` |Optionally define how to interpret vertex order for polygons / multipolygons. This parameter defines one of two coordinate system rules (Right-hand or Left-hand) each of which can be specified in three -different ways. 1. Right-hand rule (default): `right`, `ccw`, `counterclockwise`, +different ways. 1. Right-hand rule: `right`, `ccw`, `counterclockwise`, 2. Left-hand rule: `left`, `cw`, `clockwise`. The default orientation (`counterclockwise`) complies with the OGC standard which defines outer ring vertices in counterclockwise order with inner ring(s) vertices (holes) in clockwise order. Setting this parameter in the geo_shape mapping explicitly sets vertex order for the coordinate list of a geo_shape field but can be overridden in each individual GeoJSON document. +| `ccw` |`points_only` |Setting this option to `true` (defaults to `false`) configures the `geo_shape` field type for point shapes only (NOTE: Multi-Points are not @@ -70,18 +84,21 @@ yet supported). This optimizes index and search performance for the `geohash` an queries can not be executed on `geo_point` field types. This option bridges the gap by improving point performance on a `geo_shape` field so that `geo_shape` queries are optimal on a point only field. +| `false` |======================================================================= +[[prefix-trees]] [float] ==== Prefix trees To efficiently represent shapes in the index, Shapes are converted into -a series of hashes representing grid squares using implementations of a -PrefixTree. The tree notion comes from the fact that the PrefixTree uses -multiple grid layers, each with an increasing level of precision to -represent the Earth. +a series of hashes representing grid squares (commonly referred to as "rasters") +using implementations of a PrefixTree. The tree notion comes from the fact that +the PrefixTree uses multiple grid layers, each with an increasing level of +precision to represent the Earth. This can be thought of as increasing the level +of detail of a map or image at higher zoom levels. Multiple PrefixTree implementations are provided: @@ -100,6 +117,29 @@ longitude the resulting hash is a bit set. A tree level in a quad tree represents 2 bits in this bit set, one for each coordinate. The maximum amount of levels for the quad trees in Elasticsearch is 50. +[[spatial-strategy]] +[float] +===== Spatial strategies +The PrefixTree implementations rely on a SpatialStrategy for decomposing +the provided Shape(s) into approximated grid squares. Each strategy answers +the following: + +* What type of Shapes can be indexed? +* What types of Query Operations and Shapes can be used? +* Does it support more than one Shape per field? + +The following Strategy implementations (with corresponding capabilities) +are provided: + +[cols="<,<,<,<",options="header",] +|======================================================================= +|Strategy |Supported Shapes |Supported Queries |Multiple Shapes + +|`recursive` |<> |`INTERSECTS`, `DISJOINT`, `WITHIN`, `CONTAINS` |Yes +|`term` |<> |`INTERSECTS` |Yes + +|======================================================================= + [float] ===== Accuracy @@ -149,6 +189,7 @@ between index size and a reasonable level of precision of 50m at the equator. This allows for indexing tens of millions of shapes without overly bloating the resulting index too much relative to the input size. +[[input-structure]] [float] ==== Input Structure @@ -189,6 +230,7 @@ differs from many Geospatial APIs (e.g., Google Maps) that generally use the colloquial latitude, longitude (Y, X). ============================================= +[[point]] [float] ===== http://geojson.org/geojson-spec.html#id2[Point] diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 07f87037b07..b4bb06e236c 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -66,7 +66,7 @@ GET my_index/_search ==== Using `nested` fields for arrays of objects If you need to index arrays of objects and to maintain the independence of -each object in the array, you should used the `nested` datatype instead of the +each object in the array, you should use the `nested` datatype instead of the <> datatype. Internally, nested objects index each object in the array as a separate hidden document, meaning that each nested object can be queried independently of the others, with the <>: @@ -110,7 +110,7 @@ GET my_index/_search "bool": { "must": [ { "match": { "user.first": "Alice" }}, - { "match": { "user.last": "White" }} <2> + { "match": { "user.last": "Smith" }} <2> ] } } @@ -127,7 +127,7 @@ GET my_index/_search "bool": { "must": [ { "match": { "user.first": "Alice" }}, - { "match": { "user.last": "Smith" }} <3> + { "match": { "user.last": "White" }} <3> ] } }, @@ -137,14 +137,14 @@ GET my_index/_search "user.first": {} } } - } + } } } -------------------------------------------------- // AUTOSENSE <1> The `user` field is mapped as type `nested` instead of type `object`. -<2> This query doesn't match because `Alice` and `White` are not in the same nested object. +<2> This query doesn't match because `Alice` and `Smith` are not in the same nested object. <3> This query matches because `Alice` and `White` are in the same nested object. <4> `inner_hits` allow us to highlight the matching nested documents. diff --git a/docs/reference/migration/migrate_3_0.asciidoc b/docs/reference/migration/migrate_3_0.asciidoc index 1fbcc285bb0..0179e289b99 100644 --- a/docs/reference/migration/migrate_3_0.asciidoc +++ b/docs/reference/migration/migrate_3_0.asciidoc @@ -14,6 +14,7 @@ your application to Elasticsearch 3.0. * <> * <> * <> +* <> [[breaking_30_search_changes]] === Search changes @@ -213,6 +214,13 @@ float by default instead of a double. The reasoning is that floats should be more than enough for most cases but would decrease storage requirements significantly. +==== `_source`'s `format` option + +The `_source` mapping does not support the `format` option anymore. This option +will still be accepted for indices created before the upgrade to 3.0 for backward +compatibility, but it will have no effect. Indices created on or after 3.0 will +reject this option. + [[breaking_30_plugins]] === Plugin changes @@ -427,9 +435,9 @@ Use the `field(String, float)` method instead. ==== MissingQueryBuilder -The two individual setters for existence() and nullValue() were removed in favour of -optional constructor settings in order to better capture and validate their interdependent -settings at construction time. +The MissingQueryBuilder which was deprecated in 2.2.0 is removed. As a replacement use ExistsQueryBuilder +inside a mustNot() clause. So instead of using `new ExistsQueryBuilder(name)` now use +`new BoolQueryBuilder().mustNot(new ExistsQueryBuilder(name))`. ==== NotQueryBuilder @@ -507,3 +515,24 @@ from `OsStats.Cpu#getPercent`. === Fields option Only stored fields are retrievable with this option. The fields option won't be able to load non stored fields from _source anymore. + +[[breaking_30_allocation]] +=== Primary shard allocation + +Previously, primary shards were only assigned if a quorum of shard copies were found (configurable using +`index.recovery.initial_shards`, now deprecated). In case where a primary had only a single replica, quorum was defined +to be a single shard. This meant that any shard copy of an index with replication factor 1 could become primary, even it +was a stale copy of the data on disk. This is now fixed by using allocation IDs. + +Allocation IDs assign unique identifiers to shard copies. This allows the cluster to differentiate between multiple +copies of the same data and track which shards have been active, so that after a cluster restart, shard copies +containing only the most recent data can become primaries. + +==== `index.shared_filesystem.recover_on_any_node` changes + +The behavior of `index.shared_filesystem.recover_on_any_node = true` has been changed. Previously, in the case where no +shard copies could be found, an arbitrary node was chosen by potentially ignoring allocation deciders. Now, we take +balancing into account but don't assign the shard if the allocation deciders are not satisfied. The behavior has also changed +in the case where shard copies can be found. Previously, a node not holding the shard copy was chosen if none of the nodes +holding shard copies were satisfying the allocation deciders. Now, the shard will be assigned to a node having a shard copy, +even if none of the nodes holding a shard copy satisfy the allocation deciders. diff --git a/docs/reference/modules/cluster/shards_allocation.asciidoc b/docs/reference/modules/cluster/shards_allocation.asciidoc index 1daf131106d..b8073927a0f 100644 --- a/docs/reference/modules/cluster/shards_allocation.asciidoc +++ b/docs/reference/modules/cluster/shards_allocation.asciidoc @@ -22,9 +22,8 @@ Enable or disable allocation for specific kinds of shards: This setting does not affect the recovery of local primary shards when restarting a node. A restarted node that has a copy of an unassigned primary -shard will recover that primary immediately, assuming that the -<> setting is -satisfied. +shard will recover that primary immediately, assuming that its allocation id matches +one of the active allocation ids in the cluster state. -- diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index 0ae3bf28ffa..404dce4a4ae 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -38,7 +38,7 @@ These documents would *not* match the above query: <3> The `user` field is missing completely. [float] -===== `null_value` mapping +==== `null_value` mapping If the field mapping includes the <> setting then explicit `null` values are replaced with the specified `null_value`. For @@ -70,3 +70,21 @@ no values in the `user` field and thus would not match the `exists` filter: { "foo": "bar" } -------------------------------------------------- +==== `missing` query + +'missing' query has been removed because it can be advantageously replaced by an `exists` query inside a must_not +clause as follows: + +[source,js] +-------------------------------------------------- +"bool": { + "must_not": { + "exists": { + "field": "user" + } + } +} +-------------------------------------------------- + +This query returns documents that have no value in the user field. + diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index a6d13562bb0..d389380b781 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -104,7 +104,10 @@ shape: ==== Spatial Relations -The Query supports the following spatial relations: +The <> mapping parameter determines +which spatial relation operators may be used at search time. + +The following is a complete list of spatial relation operators available: * `INTERSECTS` - (default) Return all documents whose `geo_shape` field intersects the query geometry. diff --git a/docs/reference/query-dsl/missing-query.asciidoc b/docs/reference/query-dsl/missing-query.asciidoc deleted file mode 100644 index 648da068189..00000000000 --- a/docs/reference/query-dsl/missing-query.asciidoc +++ /dev/null @@ -1,132 +0,0 @@ -[[query-dsl-missing-query]] -=== Missing Query - -Returns documents that have only `null` values or no value in the original field: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "missing" : { "field" : "user" } - } - } -} --------------------------------------------------- - -For instance, the following docs would match the above filter: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [] } <1> -{ "user": [null] } <2> -{ "foo": "bar" } <3> --------------------------------------------------- -<1> This field has no values. -<2> This field has no non-`null` values. -<3> The `user` field is missing completely. - -These documents would *not* match the above filter: - -[source,js] --------------------------------------------------- -{ "user": "jane" } -{ "user": "" } <1> -{ "user": "-" } <2> -{ "user": ["jane"] } -{ "user": ["jane", null ] } <3> --------------------------------------------------- -<1> An empty string is a non-`null` value. -<2> Even though the `standard` analyzer would emit zero tokens, the original field is non-`null`. -<3> This field has one non-`null` value. - -[float] -==== `null_value` mapping - -If the field mapping includes a <> then explicit `null` values -are replaced with the specified `null_value`. For instance, if the `user` field were mapped -as follows: - -[source,js] --------------------------------------------------- - "user": { - "type": "string", - "null_value": "_null_" - } --------------------------------------------------- - -then explicit `null` values would be indexed as the string `_null_`, and the -the following docs would *not* match the `missing` filter: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [null] } --------------------------------------------------- - -However, these docs--without explicit `null` values--would still have -no values in the `user` field and thus would match the `missing` filter: - -[source,js] --------------------------------------------------- -{ "user": [] } -{ "foo": "bar" } --------------------------------------------------- - -[float] -===== `existence` and `null_value` parameters - -When the field being queried has a `null_value` mapping, then the behaviour of -the `missing` filter can be altered with the `existence` and `null_value` -parameters: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "missing" : { - "field" : "user", - "existence" : true, - "null_value" : false - } - } - } -} --------------------------------------------------- - - -`existence`:: -+ --- -When the `existence` parameter is set to `true` (the default), the missing -filter will include documents where the field has *no* values, ie: - -[source,js] --------------------------------------------------- -{ "user": [] } -{ "foo": "bar" } --------------------------------------------------- - -When set to `false`, these documents will not be included. --- - -`null_value`:: -+ --- -When the `null_value` parameter is set to `true`, the missing -filter will include documents where the field contains a `null` value, ie: - -[source,js] --------------------------------------------------- -{ "user": null } -{ "user": [null] } -{ "user": ["jane",null] } <1> --------------------------------------------------- -<1> Matches because the field contains a `null` value, even though it also contains a non-`null` value. - -When set to `false` (the default), these documents will not be included. --- - -NOTE: Either `existence` or `null_value` or both must be set to `true`. diff --git a/docs/reference/query-dsl/term-level-queries.asciidoc b/docs/reference/query-dsl/term-level-queries.asciidoc index 7e9f5e5ca3e..9c28a727b33 100644 --- a/docs/reference/query-dsl/term-level-queries.asciidoc +++ b/docs/reference/query-dsl/term-level-queries.asciidoc @@ -30,11 +30,6 @@ The queries in this group are: Find documents where the field specified contains any non-null value. -<>:: - - Find documents where the field specified does is missing or contains only - `null` values. - <>:: Find documents where the field specified contains terms which being with @@ -75,8 +70,6 @@ include::range-query.asciidoc[] include::exists-query.asciidoc[] -include::missing-query.asciidoc[] - include::prefix-query.asciidoc[] include::wildcard-query.asciidoc[] @@ -88,6 +81,3 @@ include::fuzzy-query.asciidoc[] include::type-query.asciidoc[] include::ids-query.asciidoc[] - - - diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index 6de6699984e..823bdb70d07 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -96,14 +96,6 @@ The `exists` filter has been replaced by the <>. It beh as a query in ``query context'' and as a filter in ``filter context'' (see <>). -[role="exclude",id="query-dsl-missing-filter"] -=== Missing Filter - -The `missing` filter has been replaced by the <>. It behaves -as a query in ``query context'' and as a filter in ``filter context'' (see -<>). - - [role="exclude",id="query-dsl-geo-bounding-box-filter"] === Geo Bounding Box Filter @@ -451,4 +443,3 @@ The `not` query has been replaced by using a `mustNot` clause in a Boolean query === Nested type The docs for the `nested` field datatype have moved to <>. - diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 2d8a1f8bc9a..da7d2e5ee4b 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -95,6 +95,8 @@ include::search/validate.asciidoc[] include::search/explain.asciidoc[] +include::search/profile.asciidoc[] + include::search/percolate.asciidoc[] include::search/field-stats.asciidoc[] diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc new file mode 100644 index 00000000000..b62d83eee6b --- /dev/null +++ b/docs/reference/search/profile.asciidoc @@ -0,0 +1,606 @@ +[[search-profile]] +== Profile API + +coming[2.2.0] + +experimental[] + +The Profile API provides detailed timing information about the execution of individual components +in a query. It gives the user insight into how queries are executed at a low level so that +the user can understand why certain queries are slow, and take steps to improve their slow queries. + +The output from the Profile API is *very* verbose, especially for complicated queries executed across +many shards. Pretty-printing the response is recommended to help understand the output + +[NOTE] +======================================= +The details provided by the Profile API directly expose Lucene class names and concepts, which means +that complete interpretation of the results require fairly advanced knowledge of Lucene. This +page attempts to give a crash-course in how Lucene executes queries so that you can use the Profile API to successfully +diagnose and debug queries, but it is only an overview. For complete understanding, please refer +to Lucene's documentation and, in places, the code. + +With that said, a complete understanding is often not required to fix a slow query. It is usually +sufficient to see that a particular component of a query is slow, and not necessarily understand why +the `advance` phase of that query is the cause, for example. +======================================= + +[float] +=== Usage + +Any `_search` request can be profiled by adding a top-level `profile` parameter: + +[source,js] +-------------------------------------------------- +curl -XGET 'localhost:9200/_search' -d '{ + "profile": true,<1> + "query" : { + "match" : { "message" : "search test" } + } +} +-------------------------------------------------- +<1> Setting the top-level `profile` parameter to `true` will enable profiling +for the search + +This will yield the following result: + +[source,js] +-------------------------------------------------- +{ + "took": 25, + "timed_out": false, + "_shards": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "hits": { + "total": 1, + "max_score": 1, + "hits": [ ... ] <1> + }, + "profile": { + "shards": [ + { + "id": "[htuC6YnSSSmKFq5UBt0YMA][test][0]", + "searches": [ + { + "query": [ + { + "query_type": "BooleanQuery", + "lucene": "message:search message:test", + "time": "15.52889800ms", + "breakdown": { + "score": 0, + "next_doc": 24495, + "match": 0, + "create_weight": 8488388, + "build_scorer": 7016015, + "advance": 0 + }, + "children": [ + { + "query_type": "TermQuery", + "lucene": "message:search", + "time": "4.938855000ms", + "breakdown": { + "score": 0, + "next_doc": 18332, + "match": 0, + "create_weight": 2945570, + "build_scorer": 1974953, + "advance": 0 + } + }, + { + "query_type": "TermQuery", + "lucene": "message:test", + "time": "0.5016660000ms", + "breakdown": { + "score": 0, + "next_doc": 0, + "match": 0, + "create_weight": 170534, + "build_scorer": 331132, + "advance": 0 + } + } + ] + } + ], + "rewrite_time": 185002, + "collector": [ + { + "name": "SimpleTopScoreDocCollector", + "reason": "search_top_hits", + "time": "2.206529000ms" + } + ] + } + ] + } + ] + } +} +-------------------------------------------------- +<1> Search results are returned, but were omitted here for brevity + +Even for a simple query, the response is relatively complicated. Let's break it down piece-by-piece before moving +to more complex examples. + +First, the overall structure of the profile response is as follows: + +[source,js] +-------------------------------------------------- +{ + "profile": { + "shards": [ + { + "id": "[htuC6YnSSSmKFq5UBt0YMA][test][0]", <1> + "searches": [ + { + "query": [...], <2> + "rewrite_time": 185002, <3> + "collector": [...] <4> + } + ] + } + ] + } +} +-------------------------------------------------- +<1> A profile is returned for each shard that participated in the response, and is identified +by a unique ID +<2> Each profile contains a section which holds details about the query execution +<3> Each profile has a single time representing the cumulative rewrite time +<4> Each profile also contains a section about the Lucene Collectors which run the search + +Because a search request may be executed against one or more shards in an index, and a search may cover +one or more indices, the top level element in the profile response is an array of `shard` objects. +Each shard object lists it's `id` which uniquely identifies the shard. The ID's format is +`[nodeID][indexName][shardID]`. + +The profile itself may consist of one or more "searches", where a search is a query executed against the underlying +Lucene index. Most Search Requests submitted by the user will only execute a single `search` against the Lucene index. +But occasionally multiple searches will be executed, such as including a global aggregation (which needs to execute +a secondary "match_all" query for the global context). + +Inside each `search` object there will be two arrays of profiled information: +a `query` array and a `collector` array. In the future, more sections may be added, such as `suggest`, `highlight`, +`aggregations`, etc + +There will also be a `rewrite` metric showing the total time spent rewriting the query (in nanoseconds). + +=== `query` Section + +The `query` section contains detailed timing of the query tree executed by Lucene on a particular shard. +The overall structure of this query tree will resemble your original Elasticsearch query, but may be slightly +(or sometimes very) different. It will also use similar but not always identical naming. Using our previous +`term` query example, let's analyze the `query` section: + +[source,js] +-------------------------------------------------- +"query": [ + { + "query_type": "BooleanQuery", + "lucene": "message:search message:test", + "time": "15.52889800ms", + "breakdown": {...}, <1> + "children": [ + { + "query_type": "TermQuery", + "lucene": "message:search", + "time": "4.938855000ms", + "breakdown": {...} + }, + { + "query_type": "TermQuery", + "lucene": "message:test", + "time": "0.5016660000ms", + "breakdown": {...} + } + ] + } +] +-------------------------------------------------- +<1> The breakdown timings are omitted for simplicity + +Based on the profile structure, we can see that our `match` query was rewritten by Lucene into a BooleanQuery with two +clauses (both holding a TermQuery). The `"query_type"` field displays the Lucene class name, and often aligns with +the equivalent name in Elasticsearch. The `"lucene"` field displays the Lucene explanation text for the query, and +is made available to help differentiating between parts of your query (e.g. both `"message:search"` and `"message:test"` +are TermQuery's and would appear identical otherwise. + +The `"time"` field shows that this query took ~15ms for the entire BooleanQuery to execute. The recorded time is inclusive +of all children. + +The `"breakdown"` field will give detailed stats about how the time was spent, we'll look at +that in a moment. Finally, the `"children"` array lists any sub-queries that may be present. Because we searched for two +values ("search test"), our BooleanQuery holds two children TermQueries. They have identical information (query_type, time, +breakdown, etc). Children are allowed to have their own children. + +==== Timing Breakdown + +The `breakdown` component lists detailed timing statistics about low-level Lucene execution: + +[source,js] +-------------------------------------------------- +"breakdown": { + "score": 0, + "next_doc": 24495, + "match": 0, + "create_weight": 8488388, + "build_scorer": 7016015, + "advance": 0 + +} +-------------------------------------------------- + +Timings are listed in wall-clock nanoseconds and are not normalized at all. All caveats about the overall +`time` apply here. The intention of the breakdown is to give you a feel for A) what machinery in Lucene is +actually eating time, and B) the magnitude of differences in times between the various components. Like the overall time, +the breakdown is inclusive of all children times. + +The meaning of the stats are as follows: + +[float] +=== All parameters: + +[horizontal] +`create_weight`:: + + A Query in Lucene must be capable of reuse across multiple IndexSearchers (think of it as the engine that + executes a search against a specific Lucene Index). This puts Lucene in a tricky spot, since many queries + need to accumulate temporary state/statistics associated with the index it is being used against, but the + Query contract mandates that it must be immutable. + {empty} + + {empty} + + To get around this, Lucene asks each query to generate a Weight object which acts as a temporary context + object to hold state associated with this particular (IndexSearcher, Query) tuple. The `weight` metric + shows how long this process takes + +`build_scorer`:: + + This parameter shows how long it takes to build a Scorer for the query. A Scorer is the mechanism that + iterates over matching documents generates a score per-document (e.g. how well does "foo" match the document?). + Note, this records the time required to generate the Scorer object, not actuall score the documents. Some + queries have faster or slower initialization of the Scorer, depending on optimizations, complexity, etc. + {empty} + + {empty} + + This may also showing timing associated with caching, if enabled and/or applicable for the query + +`next_doc`:: + + The Lucene method `next_doc` returns Doc ID of the next document matching the query. This statistic shows + the time it takes to determine which document is the next match, a process that varies considerably depending + on the nature of the query. Next_doc is a specialized form of advance() which is more convenient for many + queries in Lucene. It is equivalent to advance(docId() + 1) + +`advance`:: + + `advance` is the "lower level" version of next_doc: it serves the same purpose of finding the next matching + doc, but requires the calling query to perform extra tasks such as identifying and moving past skips, etc. + However, not all queries can use next_doc, so `advance` is also timed for those queries. + {empty} + + {empty} + + Conjunctions (e.g. `must` clauses in a boolean) are typical consumers of `advance` + +`matches`:: + + Some queries, such as phrase queries, match documents using a "Two Phase" process. First, the document is + "approximately" matched, and if it matches approximately, it is checked a second time with a more rigorous + (and expensive) process. The second phase verification is what the `matches` statistic measures. + {empty} + + {empty} + + For example, a phrase query first checks a document approximately by ensuring all terms in the phrase are + present in the doc. If all the terms are present, it then executes the second phase verification to ensure + the terms are in-order to form the phrase, which is relatively more expensive than just checking for presence + of the terms. + {empty} + + {empty} + + Because this two-phase process is only used by a handful of queries, the `metric` statistic will often be zero + +`score`:: + + This records the time taken to score a particular document via it's Scorer + + +=== `collectors` Section + +The Collectors portion of the response shows high-level execution details. Lucene works by defining a "Collector" +which is responsible for coordinating the traversal, scoring and collection of matching documents. Collectors +are also how a single query can record aggregation results, execute unscoped "global" queries, execute post-query +filters, etc. + +Looking at the previous example: + +[source,js] +-------------------------------------------------- +"collector": [ + { + "name": "SimpleTopScoreDocCollector", + "reason": "search_top_hits", + "time": "2.206529000ms" + } +] +-------------------------------------------------- + +We see a single collector named `SimpleTopScoreDocCollector`. This is the default "scoring and sorting" Collector +used by Elasticsearch. The `"reason"` field attempts to give an plain english description of the class name. The +`"time` is similar to the time in the Query tree: a wall-clock time inclusive of all children. Similarly, `children` lists +all sub-collectors. + +It should be noted that Collector times are **independent** from the Query times. They are calculated, combined +and normalized independently! Due to the nature of Lucene's execution, it is impossible to "merge" the times +from the Collectors into the Query section, so they are displayed in separate portions. + +For reference, the various collector reason's are: + +[horizontal] +`search_sorted`:: + + A collector that scores and sorts documents. This is the most common collector and will be seen in most + simple searches + +`search_count`:: + + A collector that only counts the number of documents that match the query, but does not fetch the source. + This is seen when `size: 0` or `search_type=count` is specified + +`search_terminate_after_count`:: + + A collector that terminates search execution after `n` matching documents have been found. This is seen + when the `terminate_after_count` query parameter has been specified + +`search_min_score`:: + + A collector that only returns matching documents that have a score greater than `n`. This is seen when + the top-level paramenter `min_score` has been specified. + +`search_multi`:: + + A collector that wraps several other collectors. This is seen when combinations of search, aggregations, + global aggs and post_filters are combined in a single search. + +`search_timeout`:: + + A collector that halts execution after a specified period of time. This is seen when a `timeout` top-level + parameter has been specified. + +`aggregation`:: + + A collector that Elasticsearch uses to run aggregations against the query scope. A single `aggregation` + collector is used to collect documents for *all* aggregations, so you will see a list of aggregations + in the name rather. + +`global_aggregation`:: + + A collector that executes an aggregation against the global query scope, rather than the specified query. + Because the global scope is necessarily different from the executed query, it must execute it's own + match_all query (which you will see added to the Query section) to collect your entire dataset + + + +=== `rewrite` Section + +All queries in Lucene undergo a "rewriting" process. A query (and its sub-queries) may be rewritten one or +more times, and the process continues until the query stops changing. This process allows Lucene to perform +optimizations, such as removing redundant clauses, replacing one query for a more efficient execution path, +etc. For example a Boolean -> Boolean -> TermQuery can be rewritten to a TermQuery, because all the Booleans +are unnecessary in this case. + +The rewriting process is complex and difficult to display, since queries can change drastically. Rather than +showing the intermediate results, the total rewrite time is simply displayed as a value (in nanoseconds). This +value is cumulative and contains the total time for all queries being rewritten. + +=== A more complex example + +To demonstrate a slightly more complex query and the associated results, we can profile the following query: + +[source,js] +-------------------------------------------------- +GET /test/_search +{ + "profile": true, + "query": { + "term": { + "message": { + "value": "search" + } + } + }, + "aggs": { + "non_global_term": { + "terms": { + "field": "agg" + }, + "aggs": { + "second_term": { + "terms": { + "field": "sub_agg" + } + } + } + }, + "another_agg": { + "cardinality": { + "field": "aggB" + } + }, + "global_agg": { + "global": {}, + "aggs": { + "my_agg2": { + "terms": { + "field": "globalAgg" + } + } + } + } + }, + "post_filter": { + "term": { + "my_field": "foo" + } + } +} +-------------------------------------------------- + +This example has: + +- A query +- A scoped aggregation +- A global aggregation +- A post_filter + +And the response: + + +[source,js] +-------------------------------------------------- +{ + "profile": { + "shards": [ + { + "id": "[P6-vulHtQRWuD4YnubWb7A][test][0]", + "searches": [ + { + "query": [ + { + "query_type": "TermQuery", + "lucene": "my_field:foo", + "time": "0.4094560000ms", + "breakdown": { + "score": 0, + "next_doc": 0, + "match": 0, + "create_weight": 31584, + "build_scorer": 377872, + "advance": 0 + } + }, + { + "query_type": "TermQuery", + "lucene": "message:search", + "time": "0.3037020000ms", + "breakdown": { + "score": 0, + "next_doc": 5936, + "match": 0, + "create_weight": 185215, + "build_scorer": 112551, + "advance": 0 + } + } + ], + "rewrite_time": 7208, + "collector": [ + { + "name": "MultiCollector", + "reason": "search_multi", + "time": "1.378943000ms", + "children": [ + { + "name": "FilteredCollector", + "reason": "search_post_filter", + "time": "0.4036590000ms", + "children": [ + { + "name": "SimpleTopScoreDocCollector", + "reason": "search_top_hits", + "time": "0.006391000000ms" + } + ] + }, + { + "name": "BucketCollector: [[non_global_term, another_agg]]", + "reason": "aggregation", + "time": "0.9546020000ms" + } + ] + } + ] + }, + { + "query": [ + { + "query_type": "MatchAllDocsQuery", + "lucene": "*:*", + "time": "0.04829300000ms", + "breakdown": { + "score": 0, + "next_doc": 3672, + "match": 0, + "create_weight": 6311, + "build_scorer": 38310, + "advance": 0 + } + } + ], + "rewrite_time": 1067, + "collector": [ + { + "name": "GlobalAggregator: [global_agg]", + "reason": "aggregation_global", + "time": "0.1226310000ms" + } + ] + } + ] + } + ] + } +} +-------------------------------------------------- + +As you can see, the output is significantly verbose from before. All the major portions of the query are +represented: + +1. The first `TermQuery` (message:search) represents the main `term` query +2. The second `TermQuery` (my_field:foo) represents the `post_filter` query +3. There is a `MatchAllDocsQuery` (\*:*) query which is being executed as a second, distinct search. This was +not part of the query specified by the user, but is auto-generated by the global aggregation to provide a global query scope + +The Collector tree is fairly straightforward, showing how a single MultiCollector wraps a FilteredCollector +to execute the post_filter (and in turn wraps the normal scoring SimpleCollector), a BucketCollector to run +all scoped aggregations. In the MatchAll search, there is a single GlobalAggregator to run the global aggregation. + +=== Performance Notes + +Like any profiler, the Profile API introduce a non-negligible overhead to query execution. The act of instrumenting +low-level method calls such as `advance` and `next_doc` can be fairly expensive, since these methods are called +in tight loops. Therefore, profiling should not be enabled in production settings by default, and should not +be compared against non-profiled query times. Profiling is just a diagnostic tool. + +There are also cases where special Lucene optimizations are disabled, since they are not amenable to profiling. This +could cause some queries to report larger relative times than their non-profiled counterparts, but in general should +not have a drastic effect compared to other components in the profiled query. + +=== Limitations + +- Profiling statistics are currently not available for suggestions, highlighting, `dfs_query_then_fetch` +- Detailed breakdown for aggregations is not currently available past the high-level overview provided +from the Collectors +- The Profiler is still highly experimental. The Profiler is instrumenting parts of Lucene that were +never designed to be exposed in this manner, and so all results should be viewed as a best effort to provide detailed +diagnostics. We hope to improve this over time. If you find obviously wrong numbers, strange query structures or +other bugs, please report them! + +=== Understanding MultiTermQuery output + +A special note needs to be made about the `MultiTermQuery` class of queries. This includes wildcards, regex and fuzzy +queries. These queries emit very verbose responses, and are not overly structured. + +Essentially, these queries rewrite themselves on a per-segment basis. If you imagine the wildcard query `b*`, it technically +can match any token that begins with the letter "b". It would be impossible to enumerate all possible combinations, +so Lucene rewrites the query in context of the segment being evaluated. E.g. one segment may contain the tokens +`[bar, baz]`, so the query rewrites to a BooleanQuery combination of "bar" and "baz". Another segment may only have the +token `[bakery]`, so query rewrites to a single TermQuery for "bakery". + +Due to this dynamic, per-segment rewriting, the clean tree structure becomes distorted and no longer follows a clean +"lineage" showing how one query rewrites into the next. At present time, all we can do is apologize, and suggest you +collapse the details for that query's children if it is too confusing. Luckily, all the timing statistics are correct, +just not the physical layout in the response, so it is sufficient to just analyze the top-level MultiTermQuery and +ignore it's children if you find the details too tricky to interpret. + +Hopefully this will be fixed in future iterations, but it is a tricky problem to solve and still in-progress :) \ No newline at end of file diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 825564d799d..e18593d21cc 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -98,7 +98,7 @@ curl -XGET 'localhost:9200/_search?scroll=1m' -d ' { "sort": [ "_doc" - } + ] } ' -------------------------------------------------- diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index 8d0b6708979..14ab207c301 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -238,7 +238,7 @@ Format in `lat,lon`. "sort" : [ { "_geo_distance" : { - "pin.location" : "-70,40", + "pin.location" : "40,-70", "order" : "asc", "unit" : "km" } @@ -301,7 +301,7 @@ Multiple geo points can be passed as an array containing any `geo_point` format, [source,js] -------------------------------------------------- "pin.location" : [[-70, 40], [-71, 42]] -"pin.location" : [{"lat": -70, "lon": 40}, {"lat": -71, "lon": 42}] +"pin.location" : [{"lat": 40, "lon": -70}, {"lat": 42, "lon": -71}] -------------------------------------------------- and so forth. diff --git a/docs/reference/testing/testing-framework.asciidoc b/docs/reference/testing/testing-framework.asciidoc index 9c0e5f4f10d..e0b27733441 100644 --- a/docs/reference/testing/testing-framework.asciidoc +++ b/docs/reference/testing/testing-framework.asciidoc @@ -116,7 +116,7 @@ public class Mytests extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { - return settingsBuilder().put(super.nodeSettings(nodeOrdinal)) + return Settings.builder().put(super.nodeSettings(nodeOrdinal)) .put("node.mode", "network") .build(); } diff --git a/modules/lang-expression/build.gradle b/modules/lang-expression/build.gradle index 9f62e34687d..5563fdafe36 100644 --- a/modules/lang-expression/build.gradle +++ b/modules/lang-expression/build.gradle @@ -33,6 +33,10 @@ dependencyLicenses { mapping from: /lucene-.*/, to: 'lucene' } +// do we or do we not depend on asm-tree, that is the question +// classes are missing, e.g. org.objectweb.asm.tree.LabelNode +thirdPartyAudit.missingClasses = true + compileJava.options.compilerArgs << '-Xlint:-rawtypes' compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java index a7f93925119..cf6017a32ca 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java @@ -95,7 +95,7 @@ public class ExpressionScriptEngineService extends AbstractComponent implements } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { // classloader created here final SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java index b05b9630a14..198558381d3 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java @@ -33,23 +33,23 @@ public class ExpressionTests extends ESSingleNodeTestCase { public void testNeedsScores() { IndexService index = createIndex("test", Settings.EMPTY, "type", "d", "type=double"); - + ExpressionScriptEngineService service = new ExpressionScriptEngineService(Settings.EMPTY); SearchLookup lookup = new SearchLookup(index.mapperService(), index.fieldData(), null); - Object compiled = service.compile("1.2"); + Object compiled = service.compile("1.2", Collections.emptyMap()); SearchScript ss = service.search(new CompiledScript(ScriptType.INLINE, "randomName", "expression", compiled), lookup, Collections.emptyMap()); assertFalse(ss.needsScores()); - compiled = service.compile("doc['d'].value"); + compiled = service.compile("doc['d'].value", Collections.emptyMap()); ss = service.search(new CompiledScript(ScriptType.INLINE, "randomName", "expression", compiled), lookup, Collections.emptyMap()); assertFalse(ss.needsScores()); - compiled = service.compile("1/_score"); + compiled = service.compile("1/_score", Collections.emptyMap()); ss = service.search(new CompiledScript(ScriptType.INLINE, "randomName", "expression", compiled), lookup, Collections.emptyMap()); assertTrue(ss.needsScores()); - compiled = service.compile("doc['d'].value * _score"); + compiled = service.compile("doc['d'].value * _score", Collections.emptyMap()); ss = service.search(new CompiledScript(ScriptType.INLINE, "randomName", "expression", compiled), lookup, Collections.emptyMap()); assertTrue(ss.needsScores()); } diff --git a/modules/lang-groovy/build.gradle b/modules/lang-groovy/build.gradle index 341dcbf0d6c..7ffb5626d4a 100644 --- a/modules/lang-groovy/build.gradle +++ b/modules/lang-groovy/build.gradle @@ -35,3 +35,12 @@ integTest { systemProperty 'es.script.indexed', 'on' } } + +// classes are missing, e.g. jline.console.completer.Completer +thirdPartyAudit.missingClasses = true +thirdPartyAudit.excludes = [ + // uses internal java api: sun.misc.Unsafe + 'groovy.json.internal.FastStringUtils', + 'groovy.json.internal.FastStringUtils$StringImplementation$1', + 'groovy.json.internal.FastStringUtils$StringImplementation$2', +] diff --git a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 85f57694ce6..1ce5a2ab761 100644 --- a/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/modules/lang-groovy/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -165,7 +165,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { try { // we reuse classloader, so do a security check just in case. SecurityManager sm = System.getSecurityManager(); diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BucketSelectorTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BucketSelectorTests.java index 2883b74cc1d..a1faea0b5e5 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BucketSelectorTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/BucketSelectorTests.java @@ -45,12 +45,14 @@ import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.derivative; import static org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders.having; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; @ESIntegTestCase.SuiteScopeTestCase public class BucketSelectorTests extends ESIntegTestCase { @@ -74,6 +76,7 @@ public class BucketSelectorTests extends ESIntegTestCase { public void setupSuiteScopeCluster() throws Exception { createIndex("idx"); createIndex("idx_unmapped"); + createIndex("idx_with_gaps"); interval = randomIntBetween(1, 50); numDocs = randomIntBetween(10, 500); @@ -84,6 +87,10 @@ public class BucketSelectorTests extends ESIntegTestCase { for (int docs = 0; docs < numDocs; docs++) { builders.add(client().prepareIndex("idx", "type").setSource(newDocBuilder())); } + builders.add(client().prepareIndex("idx_with_gaps", "type").setSource(newDocBuilder(1, 1, 0, 0))); + builders.add(client().prepareIndex("idx_with_gaps", "type").setSource(newDocBuilder(1, 2, 0, 0))); + builders.add(client().prepareIndex("idx_with_gaps", "type").setSource(newDocBuilder(3, 1, 0, 0))); + builders.add(client().prepareIndex("idx_with_gaps", "type").setSource(newDocBuilder(3, 3, 0, 0))); client().preparePutIndexedScript().setId("my_script").setScriptLang(GroovyScriptEngineService.NAME) .setSource("{ \"script\": \"Double.isNaN(_value0) ? false : (_value0 + _value1 > 100)\" }").get(); @@ -93,12 +100,17 @@ public class BucketSelectorTests extends ESIntegTestCase { } private XContentBuilder newDocBuilder() throws IOException { + return newDocBuilder(randomIntBetween(minNumber, maxNumber), randomIntBetween(minNumber, maxNumber), + randomIntBetween(minNumber, maxNumber), randomIntBetween(minNumber, maxNumber)); + } + + private XContentBuilder newDocBuilder(int field1Value, int field2Value, int field3Value, int field4Value) throws IOException { XContentBuilder jsonBuilder = jsonBuilder(); jsonBuilder.startObject(); - jsonBuilder.field(FIELD_1_NAME, randomIntBetween(minNumber, maxNumber)); - jsonBuilder.field(FIELD_2_NAME, randomIntBetween(minNumber, maxNumber)); - jsonBuilder.field(FIELD_3_NAME, randomIntBetween(minNumber, maxNumber)); - jsonBuilder.field(FIELD_4_NAME, randomIntBetween(minNumber, maxNumber)); + jsonBuilder.field(FIELD_1_NAME, field1Value); + jsonBuilder.field(FIELD_2_NAME, field2Value); + jsonBuilder.field(FIELD_3_NAME, field3Value); + jsonBuilder.field(FIELD_4_NAME, field4Value); jsonBuilder.endObject(); return jsonBuilder; } @@ -451,4 +463,70 @@ public class BucketSelectorTests extends ESIntegTestCase { assertThat(field2SumValue + field3SumValue, greaterThan(100.0)); } } + + public void testEmptyBuckets() { + SearchResponse response = client().prepareSearch("idx_with_gaps") + .addAggregation(histogram("histo").field(FIELD_1_NAME).interval(1) + .subAggregation(histogram("inner_histo").field(FIELD_1_NAME).interval(1).extendedBounds(1l, 4l).minDocCount(0) + .subAggregation(derivative("derivative").setBucketsPaths("_count").gapPolicy(GapPolicy.INSERT_ZEROS)))) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("1")); + Histogram innerHisto = bucket.getAggregations().get("inner_histo"); + assertThat(innerHisto, notNullValue()); + List innerBuckets = innerHisto.getBuckets(); + assertThat(innerBuckets, notNullValue()); + assertThat(innerBuckets.size(), equalTo(4)); + for (int i = 0; i < innerBuckets.size(); i++) { + Histogram.Bucket innerBucket = innerBuckets.get(i); + if (i == 0) { + assertThat(innerBucket.getAggregations().get("derivative"), nullValue()); + } else { + assertThat(innerBucket.getAggregations().get("derivative"), notNullValue()); + } + } + + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("2")); + innerHisto = bucket.getAggregations().get("inner_histo"); + assertThat(innerHisto, notNullValue()); + innerBuckets = innerHisto.getBuckets(); + assertThat(innerBuckets, notNullValue()); + assertThat(innerBuckets.size(), equalTo(4)); + for (int i = 0; i < innerBuckets.size(); i++) { + Histogram.Bucket innerBucket = innerBuckets.get(i); + if (i == 0) { + assertThat(innerBucket.getAggregations().get("derivative"), nullValue()); + } else { + assertThat(innerBucket.getAggregations().get("derivative"), notNullValue()); + } + } + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat(bucket.getKeyAsString(), equalTo("3")); + innerHisto = bucket.getAggregations().get("inner_histo"); + assertThat(innerHisto, notNullValue()); + innerBuckets = innerHisto.getBuckets(); + assertThat(innerBuckets, notNullValue()); + assertThat(innerBuckets.size(), equalTo(4)); + for (int i = 0; i < innerBuckets.size(); i++) { + Histogram.Bucket innerBucket = innerBuckets.get(i); + if (i == 0) { + assertThat(innerBucket.getAggregations().get("derivative"), nullValue()); + } else { + assertThat(innerBucket.getAggregations().get("derivative"), notNullValue()); + } + } + } } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java index 66a764dd75a..516514599ae 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/IndicesRequestTests.java @@ -85,6 +85,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; @@ -95,16 +96,32 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportService; import org.junit.After; import org.junit.Before; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.function.Supplier; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.*; +import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.instanceOf; @ClusterScope(scope = Scope.SUITE, numClientNodes = 1, minNumDataNodes = 2) public class IndicesRequestTests extends ESIntegTestCase { @@ -127,7 +144,7 @@ public class IndicesRequestTests extends ESIntegTestCase { protected Settings nodeSettings(int ordinal) { // must set this independently of the plugin so it overrides MockTransportService return Settings.builder().put(super.nodeSettings(ordinal)) - .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, "intercepting").build(); + .put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "intercepting").build(); } @Override @@ -756,8 +773,8 @@ public class IndicesRequestTests extends ESIntegTestCase { public String description() { return "an intercepting transport service for testing"; } - public void onModule(TransportModule transportModule) { - transportModule.addTransportService("intercepting", InterceptingTransportService.class); + public void onModule(NetworkModule module) { + module.registerTransportService("intercepting", InterceptingTransportService.class); } } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptedMetricTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptedMetricTests.java index c54510acd4e..98d53c85174 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptedMetricTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/ScriptedMetricTests.java @@ -58,7 +58,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.sameInstance; @ClusterScope(scope = Scope.SUITE) @@ -739,6 +738,10 @@ public class ScriptedMetricTests extends ESIntegTestCase { ScriptedMetric scriptedMetric = bucket.getAggregations().get("scripted"); assertThat(scriptedMetric, notNullValue()); assertThat(scriptedMetric.getName(), equalTo("scripted")); - assertThat(scriptedMetric.aggregation(), nullValue()); + assertThat(scriptedMetric.aggregation(), notNullValue()); + assertThat(scriptedMetric.aggregation(), instanceOf(List.class)); + List aggregationResult = (List) scriptedMetric.aggregation(); + assertThat(aggregationResult.size(), equalTo(1)); + assertThat(aggregationResult.get(0), equalTo(0)); } } diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java index 8153d207b7c..5a56e0f6999 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SearchFieldsTests.java @@ -392,8 +392,7 @@ public class SearchFieldsTests extends ESIntegTestCase { createIndex("test"); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("_source").field("enabled", false).endObject() + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_source").field("enabled", false).endObject().startObject("properties") .startObject("byte_field").field("type", "byte").field("store", "yes").endObject() .startObject("short_field").field("type", "short").field("store", "yes").endObject() .startObject("integer_field").field("type", "integer").field("store", "yes").endObject() @@ -556,8 +555,7 @@ public class SearchFieldsTests extends ESIntegTestCase { createIndex("test"); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("_source").field("enabled", false).endObject() + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("_source").field("enabled", false).endObject().startObject("properties") .startObject("string_field").field("type", "string").endObject() .startObject("byte_field").field("type", "byte").endObject() .startObject("short_field").field("type", "short").endObject() diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java index 5a00bca9fac..de9b5b5f4f5 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/messy/tests/SimpleSortTests.java @@ -34,7 +34,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.text.StringAndBytesText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -48,58 +48,24 @@ import org.elasticsearch.script.groovy.GroovyPlugin; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.search.sort.GeoDistanceSortBuilder; -import org.elasticsearch.search.sort.ScriptSortBuilder; -import org.elasticsearch.search.sort.SortBuilders; -import org.elasticsearch.search.sort.SortOrder; +import org.elasticsearch.search.sort.*; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.junit.annotations.TestLogging; import org.hamcrest.Matchers; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.List; -import java.util.Locale; +import java.util.*; import java.util.Map.Entry; -import java.util.Random; -import java.util.Set; -import java.util.TreeMap; import java.util.concurrent.ExecutionException; +import static org.apache.lucene.util.GeoUtils.TOLERANCE; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.index.query.QueryBuilders.*; import static org.elasticsearch.index.query.functionscore.ScoreFunctionBuilders.fieldValueFactorFunction; import static org.elasticsearch.search.sort.SortBuilders.fieldSort; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFirstHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSecondHit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSortValues; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; -import static org.hamcrest.Matchers.closeTo; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; -import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; - -import static org.apache.lucene.util.GeoUtils.TOLERANCE; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; +import static org.hamcrest.Matchers.*; /** * @@ -503,7 +469,7 @@ public class SimpleSortTests extends ESIntegTestCase { } public void testSimpleSorts() throws Exception { - Random random = getRandom(); + Random random = random(); assertAcked(prepareCreate("test") .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("str_value").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() @@ -1667,8 +1633,7 @@ public class SimpleSortTests extends ESIntegTestCase { assertThat(hits[i].getSortValues().length, is(1)); Object o = hits[i].getSortValues()[0]; assertThat(o, notNullValue()); - assertThat(o instanceof StringAndBytesText, is(true)); - StringAndBytesText text = (StringAndBytesText) o; + Text text = (Text) o; assertThat(text.string(), is("bar")); } @@ -1684,8 +1649,7 @@ public class SimpleSortTests extends ESIntegTestCase { assertThat(hits[i].getSortValues().length, is(1)); Object o = hits[i].getSortValues()[0]; assertThat(o, notNullValue()); - assertThat(o instanceof StringAndBytesText, is(true)); - StringAndBytesText text = (StringAndBytesText) o; + Text text = (Text) o; assertThat(text.string(), is("bar bar")); } } @@ -1959,7 +1923,7 @@ public class SimpleSortTests extends ESIntegTestCase { .addSort(fieldSort("str_field2").order(SortOrder.DESC).unmappedType("string")).get(); assertSortValues(resp, - new Object[] {new StringAndBytesText("bcd"), null}, + new Object[] {new Text("bcd"), null}, new Object[] {null, null}); resp = client().prepareSearch("test1", "test2") diff --git a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java index 5f91631c021..8d9279ca0dd 100644 --- a/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java +++ b/modules/lang-groovy/src/test/java/org/elasticsearch/script/groovy/GroovySecurityTests.java @@ -99,7 +99,7 @@ public class GroovySecurityTests extends ESTestCase { // filtered directly by our classloader assertFailure("getClass().getClassLoader().loadClass(\"java.lang.Runtime\").availableProcessors()", PrivilegedActionException.class); // unfortunately, we have access to other classloaders (due to indy mechanism needing getClassLoader permission) - // but we can't do much with them directly at least. + // but we can't do much with them directly at least. assertFailure("myobject.getClass().getClassLoader().loadClass(\"java.lang.Runtime\").availableProcessors()", SecurityException.class); assertFailure("d = new DateTime(); d.getClass().getDeclaredMethod(\"year\").setAccessible(true)", SecurityException.class); assertFailure("d = new DateTime(); d.\"${'get' + 'Class'}\"()." + @@ -133,9 +133,9 @@ public class GroovySecurityTests extends ESTestCase { vars.put("myarray", Arrays.asList("foo")); vars.put("myobject", new MyObject()); - se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(script)), vars).run(); + se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(script, Collections.emptyMap())), vars).run(); } - + public static class MyObject { public int getPrimitive() { return 0; } public Object getObject() { return "value"; } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java index 93172056071..78fc6571f12 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MustacheScriptEngineService.java @@ -85,7 +85,7 @@ public class MustacheScriptEngineService extends AbstractComponent implements Sc * @return a compiled template object for later execution. * */ @Override - public Object compile(String template) { + public Object compile(String template, Map params) { /** Factory to generate Mustache objects from. */ return (new JsonEscapingMustacheFactory()).compile(new FastStringReader(template), "query-template"); } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java index ce29bf246be..8e8c8981493 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTests.java @@ -28,6 +28,7 @@ import org.junit.Before; import java.io.IOException; import java.io.StringWriter; import java.nio.charset.Charset; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -52,7 +53,7 @@ public class MustacheScriptEngineTests extends ESTestCase { + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}" + "}}, \"negative_boost\": {{boost_val}} } }}"; Map vars = new HashMap<>(); vars.put("boost_val", "0.3"); - BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template)), vars).run(); + BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template, Collections.emptyMap())), vars).run(); assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + "\"negative\": {\"term\": {\"body\": {\"value\": \"solr\"}}}, \"negative_boost\": 0.3 } }}", new String(o.toBytes(), Charset.forName("UTF-8"))); @@ -63,7 +64,7 @@ public class MustacheScriptEngineTests extends ESTestCase { Map vars = new HashMap<>(); vars.put("boost_val", "0.3"); vars.put("body_val", "\"quick brown\""); - BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template)), vars).run(); + BytesReference o = (BytesReference) qe.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "", "mustache", qe.compile(template, Collections.emptyMap())), vars).run(); assertEquals("GET _search {\"query\": {\"boosting\": {\"positive\": {\"match\": {\"body\": \"gift\"}}," + "\"negative\": {\"term\": {\"body\": {\"value\": \"\\\"quick brown\\\"\"}}}, \"negative_boost\": 0.3 } }}", new String(o.toBytes(), Charset.forName("UTF-8"))); diff --git a/plugins/delete-by-query/src/main/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryPlugin.java b/plugins/delete-by-query/src/main/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryPlugin.java index b189745408f..8395223f669 100644 --- a/plugins/delete-by-query/src/main/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryPlugin.java +++ b/plugins/delete-by-query/src/main/java/org/elasticsearch/plugin/deletebyquery/DeleteByQueryPlugin.java @@ -22,14 +22,10 @@ package org.elasticsearch.plugin.deletebyquery; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.deletebyquery.DeleteByQueryAction; import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction; -import org.elasticsearch.common.inject.Module; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.rest.RestModule; import org.elasticsearch.rest.action.deletebyquery.RestDeleteByQueryAction; -import java.util.Collection; -import java.util.Collections; - public class DeleteByQueryPlugin extends Plugin { public static final String NAME = "delete-by-query"; @@ -48,8 +44,8 @@ public class DeleteByQueryPlugin extends Plugin { actionModule.registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class); } - public void onModule(RestModule restModule) { - restModule.addRestAction(RestDeleteByQueryAction.class); + public void onModule(NetworkModule module) { + module.registerRestHandler(RestDeleteByQueryAction.class); } } diff --git a/plugins/delete-by-query/src/test/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryActionTests.java b/plugins/delete-by-query/src/test/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryActionTests.java index c44608c4e4b..57bfa4c2328 100644 --- a/plugins/delete-by-query/src/test/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryActionTests.java +++ b/plugins/delete-by-query/src/test/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryActionTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.text.StringText; +import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.CountDown; @@ -339,7 +339,7 @@ public class TransportDeleteByQueryActionTests extends ESSingleNodeTestCase { final int nbDocs = randomIntBetween(0, 20); SearchHit[] docs = new SearchHit[nbDocs]; for (int i = 0; i < nbDocs; i++) { - InternalSearchHit doc = new InternalSearchHit(randomInt(), String.valueOf(i), new StringText("type"), null); + InternalSearchHit doc = new InternalSearchHit(randomInt(), String.valueOf(i), new Text("type"), null); doc.shard(new SearchShardTarget("node", "test", randomInt())); docs[i] = doc; } diff --git a/plugins/discovery-azure/build.gradle b/plugins/discovery-azure/build.gradle index 5042824eb07..ce80a441760 100644 --- a/plugins/discovery-azure/build.gradle +++ b/plugins/discovery-azure/build.gradle @@ -37,15 +37,12 @@ dependencies { compile "commons-lang:commons-lang:2.6" compile "commons-io:commons-io:2.4" compile 'javax.mail:mail:1.4.5' - compile 'javax.activation:activation:1.1' compile 'javax.inject:javax.inject:1' compile "com.sun.jersey:jersey-client:${versions.jersey}" compile "com.sun.jersey:jersey-core:${versions.jersey}" compile "com.sun.jersey:jersey-json:${versions.jersey}" compile 'org.codehaus.jettison:jettison:1.1' compile 'com.sun.xml.bind:jaxb-impl:2.2.3-1' - compile 'javax.xml.bind:jaxb-api:2.2.2' - compile 'javax.xml.stream:stax-api:1.0-2' compile 'org.codehaus.jackson:jackson-core-asl:1.9.2' compile 'org.codehaus.jackson:jackson-mapper-asl:1.9.2' compile 'org.codehaus.jackson:jackson-jaxrs:1.9.2' @@ -57,7 +54,6 @@ dependencyLicenses { mapping from: /jackson-.*/, to: 'jackson' mapping from: /jersey-.*/, to: 'jersey' mapping from: /jaxb-.*/, to: 'jaxb' - mapping from: /stax-.*/, to: 'stax' } compileJava.options.compilerArgs << '-Xlint:-path,-serial,-static,-unchecked' @@ -66,3 +62,15 @@ compileJava.options.compilerArgs << '-Xlint:-deprecation' // TODO: and why does this static not show up in maven... compileTestJava.options.compilerArgs << '-Xlint:-static' +// classes are missing, e.g. org.osgi.framework.BundleActivator +thirdPartyAudit.missingClasses = true +// TODO: figure out what is happening and fix this!!!!!!!!!!! +// there might be still some undetected jar hell! +// we need to fix https://github.com/policeman-tools/forbidden-apis/issues/91 first +thirdPartyAudit.excludes = [ + // uses internal java api: com.sun.xml.fastinfoset.stax.StAXDocumentParser + 'com.sun.xml.bind.v2.runtime.unmarshaller.FastInfosetConnector', + 'com.sun.xml.bind.v2.runtime.unmarshaller.FastInfosetConnector$CharSequenceImpl', + // uses internal java api: com.sun.xml.fastinfoset.stax.StAXDocumentSerializer + 'com.sun.xml.bind.v2.runtime.output.FastInfosetStreamWriterOutput', +] diff --git a/plugins/discovery-azure/licenses/activation-1.1.jar.sha1 b/plugins/discovery-azure/licenses/activation-1.1.jar.sha1 deleted file mode 100644 index c4ee8fa5eb8..00000000000 --- a/plugins/discovery-azure/licenses/activation-1.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6cb541461c2834bdea3eb920f1884d1eb508b50 diff --git a/plugins/discovery-azure/licenses/activation-LICENSE.txt b/plugins/discovery-azure/licenses/activation-LICENSE.txt deleted file mode 100644 index 1154e0aeec5..00000000000 --- a/plugins/discovery-azure/licenses/activation-LICENSE.txt +++ /dev/null @@ -1,119 +0,0 @@ -COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0 - -1. Definitions. - -1.1. Contributor means each individual or entity that creates or contributes to the creation of Modifications. - -1.2. Contributor Version means the combination of the Original Software, prior Modifications used by a Contributor (if any), and the Modifications made by that particular Contributor. - -1.3. Covered Software means (a) the Original Software, or (b) Modifications, or (c) the combination of files containing Original Software with files containing Modifications, in each case including portions thereof. - -1.4. Executable means the Covered Software in any form other than Source Code. - -1.5. Initial Developer means the individual or entity that first makes Original Software available under this License. - -1.6. Larger Work means a work which combines Covered Software or portions thereof with code not governed by the terms of this License. - -1.7. License means this document. - -1.8. Licensable means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently acquired, any and all of the rights conveyed herein. - -1.9. Modifications means the Source Code and Executable form of any of the following: - -A. Any file that results from an addition to, deletion from or modification of the contents of a file containing Original Software or previous Modifications; - -B. Any new file that contains any part of the Original Software or previous Modification; or - -C. Any new file that is contributed or otherwise made available under the terms of this License. - -1.10. Original Software means the Source Code and Executable form of computer software code that is originally released under this License. - -1.11. Patent Claims means any patent claim(s), now owned or hereafter acquired, including without limitation, method, process, and apparatus claims, in any patent Licensable by grantor. - -1.12. Source Code means (a) the common form of computer software code in which modifications are made and (b) associated documentation included in or with such code. - -1.13. You (or Your) means an individual or a legal entity exercising rights under, and complying with all of the terms of, this License. For legal entities, You includes any entity which controls, is controlled by, or is under common control with You. For purposes of this definition, control means (a)áthe power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b)áownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. - -2. License Grants. - -2.1. The Initial Developer Grant. -Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, the Initial Developer hereby grants You a world-wide, royalty-free, non-exclusive license: -(a) under intellectual property rights (other than patent or trademark) Licensable by Initial Developer, to use, reproduce, modify, display, perform, sublicense and distribute the Original Software (or portions thereof), with or without Modifications, and/or as part of a Larger Work; and -(b) under Patent Claims infringed by the making, using or selling of Original Software, to make, have made, use, practice, sell, and offer for sale, and/or otherwise dispose of the Original Software (or portions thereof). -(c) The licenses granted in Sectionsá2.1(a) and (b) are effective on the date Initial Developer first distributes or otherwise makes the Original Software available to a third party under the terms of this License. -(d) Notwithstanding Sectioná2.1(b) above, no patent license is granted: (1)áfor code that You delete from the Original Software, or (2)áfor infringements caused by: (i)áthe modification of the Original Software, or (ii)áthe combination of the Original Software with other software or devices. - -2.2. Contributor Grant. -Conditioned upon Your compliance with Section 3.1 below and subject to third party intellectual property claims, each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: -(a) under intellectual property rights (other than patent or trademark) Licensable by Contributor to use, reproduce, modify, display, perform, sublicense and distribute the Modifications created by such Contributor (or portions thereof), either on an unmodified basis, with other Modifications, as Covered Software and/or as part of a Larger Work; and -(b) under Patent Claims infringed by the making, using, or selling of Modifications made by that Contributor either alone and/or in combination with its Contributor Version (or portions of such combination), to make, use, sell, offer for sale, have made, and/or otherwise dispose of: (1)áModifications made by that Contributor (or portions thereof); and (2)áthe combination of Modifications made by that Contributor with its Contributor Version (or portions of such combination). -(c) The licenses granted in Sectionsá2.2(a) and 2.2(b) are effective on the date Contributor first distributes or otherwise makes the Modifications available to a third party. -(d) Notwithstanding Sectioná2.2(b) above, no patent license is granted: (1)áfor any code that Contributor has deleted from the Contributor Version; (2)áfor infringements caused by: (i)áthird party modifications of Contributor Version, or (ii)áthe combination of Modifications made by that Contributor with other software (except as part of the Contributor Version) or other devices; or (3)áunder Patent Claims infringed by Covered Software in the absence of Modifications made by that Contributor. - -3. Distribution Obligations. - -3.1. Availability of Source Code. - -Any Covered Software that You distribute or otherwise make available in Executable form must also be made available in Source Code form and that Source Code form must be distributed only under the terms of this License. You must include a copy of this License with every copy of the Source Code form of the Covered Software You distribute or otherwise make available. You must inform recipients of any such Covered Software in Executable form as to how they can obtain such Covered Software in Source Code form in a reasonable manner on or through a medium customarily used for software exchange. - -3.2. Modifications. - -The Modifications that You create or to which You contribute are governed by the terms of this License. You represent that You believe Your Modifications are Your original creation(s) and/or You have sufficient rights to grant the rights conveyed by this License. - -3.3. Required Notices. -You must include a notice in each of Your Modifications that identifies You as the Contributor of the Modification. You may not remove or alter any copyright, patent or trademark notices contained within the Covered Software, or any notices of licensing or any descriptive text giving attribution to any Contributor or the Initial Developer. - -3.4. Application of Additional Terms. -You may not offer or impose any terms on any Covered Software in Source Code form that alters or restricts the applicable version of this License or the recipients rights hereunder. You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, you may do so only on Your own behalf, and not on behalf of the Initial Developer or any Contributor. You must make it absolutely clear that any such warranty, support, indemnity or liability obligation is offered by You alone, and You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of warranty, support, indemnity or liability terms You offer. - -3.5. Distribution of Executable Versions. -You may distribute the Executable form of the Covered Software under the terms of this License or under the terms of a license of Your choice, which may contain terms different from this License, provided that You are in compliance with the terms of this License and that the license for the Executable form does not attempt to limit or alter the recipients rights in the Source Code form from the rights set forth in this License. If You distribute the Covered Software in Executable form under a different license, You must make it absolutely clear that any terms which differ from this License are offered by You alone, not by the Initial Developer or Contributor. You hereby agree to indemnify the Initial Developer and every Contributor for any liability incurred by the Initial Developer or such Contributor as a result of any such terms You offer. - -3.6. Larger Works. -You may create a Larger Work by combining Covered Software with other code not governed by the terms of this License and distribute the Larger Work as a single product. In such a case, You must make sure the requirements of this License are fulfilled for the Covered Software. - -4. Versions of the License. - -4.1. New Versions. -Sun Microsystems, Inc. is the initial license steward and may publish revised and/or new versions of this License from time to time. Each version will be given a distinguishing version number. Except as provided in Section 4.3, no one other than the license steward has the right to modify this License. - -4.2. Effect of New Versions. - -You may always continue to use, distribute or otherwise make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. If the Initial Developer includes a notice in the Original Software prohibiting it from being distributed or otherwise made available under any subsequent version of the License, You must distribute and make the Covered Software available under the terms of the version of the License under which You originally received the Covered Software. Otherwise, You may also choose to use, distribute or otherwise make the Covered Software available under the terms of any subsequent version of the License published by the license steward. -4.3. Modified Versions. - -When You are an Initial Developer and You want to create a new license for Your Original Software, You may create and use a modified version of this License if You: (a)árename the license and remove any references to the name of the license steward (except to note that the license differs from this License); and (b)áotherwise make it clear that the license contains terms which differ from this License. - -5. DISCLAIMER OF WARRANTY. - -COVERED SOFTWARE IS PROVIDED UNDER THIS LICENSE ON AN AS IS BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED SOFTWARE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED SOFTWARE IS WITH YOU. SHOULD ANY COVERED SOFTWARE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER. - -6. TERMINATION. - -6.1. This License and the rights granted hereunder will terminate automatically if You fail to comply with terms herein and fail to cure such breach within 30 days of becoming aware of the breach. Provisions which, by their nature, must remain in effect beyond the termination of this License shall survive. - -6.2. If You assert a patent infringement claim (excluding declaratory judgment actions) against Initial Developer or a Contributor (the Initial Developer or Contributor against whom You assert such claim is referred to as Participant) alleging that the Participant Software (meaning the Contributor Version where the Participant is a Contributor or the Original Software where the Participant is the Initial Developer) directly or indirectly infringes any patent, then any and all rights granted directly or indirectly to You by such Participant, the Initial Developer (if the Initial Developer is not the Participant) and all Contributors under Sectionsá2.1 and/or 2.2 of this License shall, upon 60 days notice from Participant terminate prospectively and automatically at the expiration of such 60 day notice period, unless if within such 60 day period You withdraw Your claim with respect to the Participant Software against such Participant either unilaterally or pursuant to a written agreement with Participant. - -6.3. In the event of termination under Sectionsá6.1 or 6.2 above, all end user licenses that have been validly granted by You or any distributor hereunder prior to termination (excluding licenses granted to You by any distributor) shall survive termination. - -7. LIMITATION OF LIABILITY. - -UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED SOFTWARE, OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOST PROFITS, LOSS OF GOODWILL, WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY RESULTING FROM SUCH PARTYS NEGLIGENCE TO THE EXTENT APPLICABLE LAW PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU. - -8. U.S. GOVERNMENT END USERS. - -The Covered Software is a commercial item, as that term is defined in 48áC.F.R.á2.101 (Oct. 1995), consisting of commercial computer software (as that term is defined at 48 C.F.R. á252.227-7014(a)(1)) and commercial computer software documentation as such terms are used in 48áC.F.R.á12.212 (Sept. 1995). Consistent with 48 C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995), all U.S. Government End Users acquire Covered Software with only those rights set forth herein. This U.S. Government Rights clause is in lieu of, and supersedes, any other FAR, DFAR, or other clause or provision that addresses Government rights in computer software under this License. - -9. MISCELLANEOUS. - -This License represents the complete agreement concerning subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. This License shall be governed by the law of the jurisdiction specified in a notice contained within the Original Software (except to the extent applicable law, if any, provides otherwise), excluding such jurisdictions conflict-of-law provisions. Any litigation relating to this License shall be subject to the jurisdiction of the courts located in the jurisdiction and venue specified in a notice contained within the Original Software, with the losing party responsible for costs, including, without limitation, court costs and reasonable attorneys fees and expenses. The application of the United Nations Convention on Contracts for the International Sale of Goods is expressly excluded. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not apply to this License. You agree that You alone are responsible for compliance with the United States export administration regulations (and the export control laws and regulation of any other countries) when You use, distribute or otherwise make available any Covered Software. - -10. RESPONSIBILITY FOR CLAIMS. - -As between Initial Developer and the Contributors, each party is responsible for claims and damages arising, directly or indirectly, out of its utilization of rights under this License and You agree to work with Initial Developer and Contributors to distribute such responsibility on an equitable basis. Nothing herein is intended or shall be deemed to constitute any admission of liability. - -NOTICE PURSUANT TO SECTION 9 OF THE COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) -The GlassFish code released under the CDDL shall be governed by the laws of the State of California (excluding conflict-of-law provisions). Any litigation relating to this License shall be subject to the jurisdiction of the Federal Courts of the Northern District of California and the state courts of the State of California, with venue lying in Santa Clara County, California. - - - diff --git a/plugins/discovery-azure/licenses/activation-NOTICE.txt b/plugins/discovery-azure/licenses/activation-NOTICE.txt deleted file mode 100644 index 8d1c8b69c3f..00000000000 --- a/plugins/discovery-azure/licenses/activation-NOTICE.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/plugins/discovery-azure/licenses/jaxb-api-2.2.2.jar.sha1 b/plugins/discovery-azure/licenses/jaxb-api-2.2.2.jar.sha1 deleted file mode 100644 index a145d47cec9..00000000000 --- a/plugins/discovery-azure/licenses/jaxb-api-2.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aeb3021ca93dde265796d82015beecdcff95bf09 diff --git a/plugins/discovery-azure/licenses/stax-LICENSE.txt b/plugins/discovery-azure/licenses/stax-LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/plugins/discovery-azure/licenses/stax-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/plugins/discovery-azure/licenses/stax-NOTICE.txt b/plugins/discovery-azure/licenses/stax-NOTICE.txt deleted file mode 100644 index 8d1c8b69c3f..00000000000 --- a/plugins/discovery-azure/licenses/stax-NOTICE.txt +++ /dev/null @@ -1 +0,0 @@ - diff --git a/plugins/discovery-azure/licenses/stax-api-1.0-2.jar.sha1 b/plugins/discovery-azure/licenses/stax-api-1.0-2.jar.sha1 deleted file mode 100644 index fb00ad889b6..00000000000 --- a/plugins/discovery-azure/licenses/stax-api-1.0-2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d6337b0de8b25e53e81b922352fbea9f9f57ba0b diff --git a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java b/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java index 36b20b09fc1..89d6d17298f 100755 --- a/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java +++ b/plugins/discovery-azure/src/main/java/org/elasticsearch/discovery/azure/AzureDiscovery.java @@ -22,12 +22,12 @@ package org.elasticsearch.discovery.azure; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -40,9 +40,9 @@ public class AzureDiscovery extends ZenDiscovery { @Inject public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, + ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { - super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, + super(settings, clusterName, threadPool, transportService, clusterService, clusterSettings, pingService, electMasterService, discoverySettings); } } diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 77cfd6626d5..355dbc55164 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -48,3 +48,12 @@ test { // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name } + +// classes are missing, e.g. org.apache.avalon.framework.logger.Logger +thirdPartyAudit.missingClasses = true +thirdPartyAudit.excludes = [ + // uses internal java api: com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl + // uses internal java api: com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault + // uses internal java api: com.sun.org.apache.xpath.internal.XPathContext + 'com.amazonaws.util.XpathUtils', +] diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java index ec1ffd54a77..b6306e6209c 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/cloud/aws/AwsEc2ServiceImpl.java @@ -32,6 +32,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cloud.aws.network.Ec2NameResolver; import org.elasticsearch.cloud.aws.node.Ec2CustomNodeAttributes; import org.elasticsearch.cluster.node.DiscoveryNodeService; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.network.NetworkService; @@ -119,7 +120,7 @@ public class AwsEc2ServiceImpl extends AbstractLifecycleComponent } // Increase the number of retries in case of 5xx API responses - final Random rand = new Random(); + final Random rand = Randomness.get(); RetryPolicy retryPolicy = new RetryPolicy( RetryPolicy.RetryCondition.NO_RETRY_CONDITION, new RetryPolicy.BackoffStrategy() { diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java index e94b7618d12..aa3cef01d03 100755 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java @@ -22,12 +22,12 @@ package org.elasticsearch.discovery.ec2; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -40,10 +40,10 @@ public class Ec2Discovery extends ZenDiscovery { @Inject public Ec2Discovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, + ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { - super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, + super(settings, clusterName, threadPool, transportService, clusterService, clusterSettings, pingService, electMasterService, discoverySettings); } } diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 4e6ade8788f..b054e0f37b8 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -31,3 +31,6 @@ test { // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name } + +// classes are missing, e.g. org.apache.log.Logger +thirdPartyAudit.missingClasses = true diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java index f20d1c74f83..fe87b9244d4 100755 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceDiscovery.java @@ -22,12 +22,12 @@ package org.elasticsearch.discovery.gce; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.discovery.zen.elect.ElectMasterService; import org.elasticsearch.discovery.zen.ping.ZenPingService; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -40,10 +40,10 @@ public class GceDiscovery extends ZenDiscovery { @Inject public GceDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService, - ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService, + ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService, DiscoverySettings discoverySettings, ElectMasterService electMasterService) { - super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService, + super(settings, clusterName, threadPool, transportService, clusterService, clusterSettings, pingService, electMasterService, discoverySettings); } } diff --git a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java index 33a4e55801b..825a8d358d9 100644 --- a/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java +++ b/plugins/lang-javascript/src/main/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineService.java @@ -62,7 +62,7 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements // one time initialization of rhino security manager integration private static final CodeSource DOMAIN; private static final int OPTIMIZATION_LEVEL = 1; - + static { try { DOMAIN = new CodeSource(new URL("file:" + BootstrapInfo.UNTRUSTED_CODEBASE), (Certificate[]) null); @@ -110,7 +110,7 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements if (securityDomain != DOMAIN) { throw new SecurityException("illegal securityDomain: " + securityDomain); } - + return super.createClassLoader(parent, securityDomain); } }); @@ -157,7 +157,7 @@ public class JavaScriptScriptEngineService extends AbstractComponent implements } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { Context ctx = Context.enter(); try { return ctx.compileString(script, generateScriptName(), 1, DOMAIN); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java index fe9cc324f1c..9d8357bb582 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptEngineTests.java @@ -29,6 +29,7 @@ import org.junit.After; import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -54,7 +55,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { public void testSimpleEquation() { Map vars = new HashMap(); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "js", se.compile("1 + 2")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "js", se.compile("1 + 2", Collections.emptyMap())), vars).run(); assertThat(((Number) o).intValue(), equalTo(3)); } @@ -64,20 +65,20 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).put("l", Arrays.asList("2", "1")).map(); vars.put("obj1", obj1); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1", Collections.emptyMap())), vars).run(); assertThat(o, instanceOf(Map.class)); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1.l[0]")), vars).run(); + o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "js", se.compile("obj1.l[0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("2")); } public void testJavaScriptObjectToMap() { Map vars = new HashMap(); Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptObjectToMap", "js", - se.compile("var obj1 = {}; obj1.prop1 = 'value1'; obj1.obj2 = {}; obj1.obj2.prop2 = 'value2'; obj1")), vars).run(); + se.compile("var obj1 = {}; obj1.prop1 = 'value1'; obj1.obj2 = {}; obj1.obj2.prop2 = 'value2'; obj1", Collections.emptyMap())), vars).run(); Map obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); @@ -92,7 +93,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { vars.put("ctx", ctx); ExecutableScript executable = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptObjectMapInter", "js", - se.compile("ctx.obj2 = {}; ctx.obj2.prop2 = 'value2'; ctx.obj1.prop1 = 'uvalue1'")), vars); + se.compile("ctx.obj2 = {}; ctx.obj2.prop2 = 'value2'; ctx.obj1.prop1 = 'uvalue1'", Collections.emptyMap())), vars); executable.run(); ctx = (Map) executable.unwrap(vars.get("ctx")); assertThat(ctx.containsKey("obj1"), equalTo(true)); @@ -106,7 +107,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map doc = new HashMap(); ctx.put("doc", doc); - Object compiled = se.compile("ctx.doc.field1 = ['value1', 'value2']"); + Object compiled = se.compile("ctx.doc.field1 = ['value1', 'value2']", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testJavaScriptInnerArrayCreation", "js", compiled), new HashMap()); script.setNextVar("ctx", ctx); @@ -124,21 +125,21 @@ public class JavaScriptScriptEngineTests extends ESTestCase { vars.put("l", Arrays.asList("1", "2", "3", obj1)); Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", - se.compile("l.length")), vars).run(); + se.compile("l.length", Collections.emptyMap())), vars).run(); assertThat(((Number) o).intValue(), equalTo(4)); o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", - se.compile("l[0]")), vars).run(); + se.compile("l[0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("1")); o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", - se.compile("l[3]")), vars).run(); + se.compile("l[3]", Collections.emptyMap())), vars).run(); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessInScript", "js", - se.compile("l[3].prop1")), vars).run(); + se.compile("l[3].prop1", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("value1")); } @@ -146,7 +147,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { Map vars = new HashMap(); Map ctx = new HashMap(); vars.put("ctx", ctx); - Object compiledScript = se.compile("ctx.value"); + Object compiledScript = se.compile("ctx.value", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "js", compiledScript), vars); @@ -161,7 +162,7 @@ public class JavaScriptScriptEngineTests extends ESTestCase { public void testChangingVarsCrossExecution2() { Map vars = new HashMap(); - Object compiledScript = se.compile("value"); + Object compiledScript = se.compile("value", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "js", compiledScript), vars); diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java index 2308e666c51..2aa6e13a99f 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptScriptMultiThreadedTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -40,7 +41,7 @@ import static org.hamcrest.Matchers.equalTo; public class JavaScriptScriptMultiThreadedTests extends ESTestCase { public void testExecutableNoRuntimeParams() throws Exception { final JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[50]; @@ -82,7 +83,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { public void testExecutableWithRuntimeParams() throws Exception { final JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[50]; @@ -124,7 +125,7 @@ public class JavaScriptScriptMultiThreadedTests extends ESTestCase { public void testExecute() throws Exception { final JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final AtomicBoolean failed = new AtomicBoolean(); Thread[] threads = new Thread[50]; diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java index c6f9805f818..dccc36d1bf7 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/JavaScriptSecurityTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.ESTestCase; import org.mozilla.javascript.EcmaError; import org.mozilla.javascript.WrappedException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -33,7 +34,7 @@ import java.util.Map; * Tests for the Javascript security permissions */ public class JavaScriptSecurityTests extends ESTestCase { - + private JavaScriptScriptEngineService se; @Override @@ -53,14 +54,14 @@ public class JavaScriptSecurityTests extends ESTestCase { /** runs a script */ private void doTest(String script) { Map vars = new HashMap(); - se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(script)), vars).run(); + se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "js", se.compile(script, Collections.emptyMap())), vars).run(); } - + /** asserts that a script runs without exception */ private void assertSuccess(String script) { doTest(script); } - + /** assert that a security exception is hit */ private void assertFailure(String script, Class exceptionClass) { try { @@ -78,13 +79,13 @@ public class JavaScriptSecurityTests extends ESTestCase { } } } - + /** Test some javascripts that are ok */ public void testOK() { assertSuccess("1 + 2"); assertSuccess("Math.cos(Math.PI)"); } - + /** Test some javascripts that should hit security exception */ public void testNotOK() throws Exception { // sanity check :) diff --git a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java index bb7eb31c85d..3445c116057 100644 --- a/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java +++ b/plugins/lang-javascript/src/test/java/org/elasticsearch/script/javascript/SimpleBench.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -35,7 +36,7 @@ public class SimpleBench { public static void main(String[] args) { JavaScriptScriptEngineService se = new JavaScriptScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - Object compiled = se.compile("x + y"); + Object compiled = se.compile("x + y", Collections.emptyMap()); CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "js", compiled); Map vars = new HashMap(); diff --git a/plugins/lang-plan-a/build.gradle b/plugins/lang-plan-a/build.gradle index 618c094f683..5f0ddafcc97 100644 --- a/plugins/lang-plan-a/build.gradle +++ b/plugins/lang-plan-a/build.gradle @@ -33,6 +33,9 @@ dependencies { compileJava.options.compilerArgs << '-Xlint:-cast,-fallthrough,-rawtypes' compileTestJava.options.compilerArgs << '-Xlint:-unchecked' +// classes are missing, e.g. org.objectweb.asm.tree.LabelNode +thirdPartyAudit.missingClasses = true + // regeneration logic, comes in via ant right now // don't port it to gradle, it works fine. diff --git a/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/PlanAScriptEngineService.java b/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/PlanAScriptEngineService.java index 6b3cd834715..7795f74700b 100644 --- a/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/PlanAScriptEngineService.java +++ b/plugins/lang-plan-a/src/main/java/org/elasticsearch/plan/a/PlanAScriptEngineService.java @@ -28,6 +28,7 @@ import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.ScriptEngineService; +import org.elasticsearch.script.ScriptException; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; @@ -37,15 +38,16 @@ import java.security.AccessController; import java.security.Permissions; import java.security.PrivilegedAction; import java.security.ProtectionDomain; +import java.util.HashMap; import java.util.Map; public class PlanAScriptEngineService extends AbstractComponent implements ScriptEngineService { public static final String NAME = "plan-a"; - // TODO: this should really be per-script since scripts do so many different things? - private static final CompilerSettings compilerSettings = new CompilerSettings(); - - public static final String NUMERIC_OVERFLOW = "plan-a.numeric_overflow"; + // default settings, used unless otherwise specified + private static final CompilerSettings DEFAULT_COMPILER_SETTINGS = new CompilerSettings(); + + public static final String NUMERIC_OVERFLOW = "numeric_overflow"; // TODO: how should custom definitions be specified? private Definition definition = null; @@ -53,7 +55,6 @@ public class PlanAScriptEngineService extends AbstractComponent implements Scrip @Inject public PlanAScriptEngineService(Settings settings) { super(settings); - compilerSettings.setNumericOverflow(settings.getAsBoolean(NUMERIC_OVERFLOW, compilerSettings.getNumericOverflow())); } public void setDefinition(final Definition definition) { @@ -86,7 +87,23 @@ public class PlanAScriptEngineService extends AbstractComponent implements Scrip } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { + final CompilerSettings compilerSettings; + if (params.isEmpty()) { + compilerSettings = DEFAULT_COMPILER_SETTINGS; + } else { + // custom settings + compilerSettings = new CompilerSettings(); + Map clone = new HashMap<>(params); + String value = clone.remove(NUMERIC_OVERFLOW); + if (value != null) { + // TODO: can we get a real boolean parser in here? + compilerSettings.setNumericOverflow(Boolean.parseBoolean(value)); + } + if (!clone.isEmpty()) { + throw new IllegalArgumentException("Unrecognized compile-time parameter(s): " + clone); + } + } // check we ourselves are not being called by unprivileged code SecurityManager sm = System.getSecurityManager(); if (sm != null) { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowDisabledTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowDisabledTests.java index 94beac0c58c..4603a669df2 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowDisabledTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowDisabledTests.java @@ -19,17 +19,16 @@ package org.elasticsearch.plan.a; -import org.elasticsearch.common.settings.Settings; +import java.util.Collections; +import java.util.Map; /** Tests floating point overflow with numeric overflow disabled */ public class FloatOverflowDisabledTests extends ScriptTestCase { + /** wire overflow to false for all tests */ @Override - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(super.getSettings()); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, false); - return builder.build(); + public Object exec(String script, Map vars) { + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, "false")); } public void testAssignmentAdditionOverflow() { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowEnabledTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowEnabledTests.java index ff1c315628f..02a738de71e 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowEnabledTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/FloatOverflowEnabledTests.java @@ -19,17 +19,16 @@ package org.elasticsearch.plan.a; -import org.elasticsearch.common.settings.Settings; +import java.util.Collections; +import java.util.Map; /** Tests floating point overflow with numeric overflow enabled */ public class FloatOverflowEnabledTests extends ScriptTestCase { + /** wire overflow to true for all tests */ @Override - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(super.getSettings()); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, true); - return builder.build(); + public Object exec(String script, Map vars) { + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, "true")); } public void testAssignmentAdditionOverflow() { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowDisabledTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowDisabledTests.java index 279ea0616d9..dbffb11f0d0 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowDisabledTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowDisabledTests.java @@ -19,17 +19,16 @@ package org.elasticsearch.plan.a; -import org.elasticsearch.common.settings.Settings; +import java.util.Collections; +import java.util.Map; /** Tests integer overflow with numeric overflow disabled */ public class IntegerOverflowDisabledTests extends ScriptTestCase { + /** wire overflow to true for all tests */ @Override - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(super.getSettings()); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, false); - return builder.build(); + public Object exec(String script, Map vars) { + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, "false")); } public void testAssignmentAdditionOverflow() { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowEnabledTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowEnabledTests.java index 8abd2695915..cdab0e89fe6 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowEnabledTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/IntegerOverflowEnabledTests.java @@ -19,17 +19,16 @@ package org.elasticsearch.plan.a; -import org.elasticsearch.common.settings.Settings; +import java.util.Collections; +import java.util.Map; /** Tests integer overflow with numeric overflow enabled */ public class IntegerOverflowEnabledTests extends ScriptTestCase { + /** wire overflow to true for all tests */ @Override - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(super.getSettings()); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, true); - return builder.build(); + public Object exec(String script, Map vars) { + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, "true")); } public void testAssignmentAdditionOverflow() { diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptEngineTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptEngineTests.java index d2bbe02a625..e5084392f99 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptEngineTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptEngineTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -78,7 +79,7 @@ public class ScriptEngineTests extends ScriptTestCase { Map ctx = new HashMap<>(); vars.put("ctx", ctx); - Object compiledScript = scriptEngine.compile("return ((Map)input.get(\"ctx\")).get(\"value\");"); + Object compiledScript = scriptEngine.compile("return ((Map)input.get(\"ctx\")).get(\"value\");", Collections.emptyMap()); ExecutableScript script = scriptEngine.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "plan-a", compiledScript), vars); @@ -93,7 +94,7 @@ public class ScriptEngineTests extends ScriptTestCase { public void testChangingVarsCrossExecution2() { Map vars = new HashMap<>(); - Object compiledScript = scriptEngine.compile("return input.get(\"value\");"); + Object compiledScript = scriptEngine.compile("return input.get(\"value\");", Collections.emptyMap()); ExecutableScript script = scriptEngine.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "plan-a", compiledScript), vars); diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptTestCase.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptTestCase.java index 253e37183f3..5b4948036f3 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptTestCase.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/ScriptTestCase.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import java.util.Collections; import java.util.Map; /** @@ -34,17 +35,10 @@ import java.util.Map; */ public abstract class ScriptTestCase extends ESTestCase { protected PlanAScriptEngineService scriptEngine; - - /** Override to provide different compiler settings */ - protected Settings getSettings() { - Settings.Builder builder = Settings.builder(); - builder.put(PlanAScriptEngineService.NUMERIC_OVERFLOW, random().nextBoolean()); - return builder.build(); - } @Before public void setup() { - scriptEngine = new PlanAScriptEngineService(getSettings()); + scriptEngine = new PlanAScriptEngineService(Settings.EMPTY); } /** Compiles and returns the result of {@code script} */ @@ -54,7 +48,12 @@ public abstract class ScriptTestCase extends ESTestCase { /** Compiles and returns the result of {@code script} with access to {@code vars} */ public Object exec(String script, Map vars) { - Object object = scriptEngine.compile(script); + return exec(script, vars, Collections.singletonMap(PlanAScriptEngineService.NUMERIC_OVERFLOW, Boolean.toString(random().nextBoolean()))); + } + + /** Compiles and returns the result of {@code script} with access to {@code vars} and compile-time parameters */ + public Object exec(String script, Map vars, Map compileParams) { + Object object = scriptEngine.compile(script, compileParams); CompiledScript compiled = new CompiledScript(ScriptService.ScriptType.INLINE, getTestName(), "plan-a", object); return scriptEngine.executable(compiled, vars).run(); } diff --git a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/WhenThingsGoWrongTests.java b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/WhenThingsGoWrongTests.java index de2c1c9ea3e..277778e7e76 100644 --- a/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/WhenThingsGoWrongTests.java +++ b/plugins/lang-plan-a/src/test/java/org/elasticsearch/plan/a/WhenThingsGoWrongTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.plan.a; +import java.util.Collections; + public class WhenThingsGoWrongTests extends ScriptTestCase { public void testNullPointer() { try { @@ -38,4 +40,13 @@ public class WhenThingsGoWrongTests extends ScriptTestCase { fail("should have hit cce"); } catch (ClassCastException expected) {} } + + public void testBogusParameter() { + try { + exec("return 5;", null, Collections.singletonMap("bogusParameterKey", "bogusParameterValue")); + fail("should have hit IAE"); + } catch (IllegalArgumentException expected) { + assertTrue(expected.getMessage().contains("Unrecognized compile-time parameter")); + } + } } diff --git a/plugins/lang-python/build.gradle b/plugins/lang-python/build.gradle index 269a3249386..1c33ad2d5ee 100644 --- a/plugins/lang-python/build.gradle +++ b/plugins/lang-python/build.gradle @@ -36,3 +36,380 @@ integTest { } } +// classes are missing, e.g. org.tukaani.xz.FilterOptions +thirdPartyAudit.missingClasses = true +thirdPartyAudit.excludes = [ + // uses internal java api: sun.security.x509 (X509CertInfo, X509CertImpl, X500Name) + 'org.python.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + + // uses internal java api: sun.misc.Cleaner + 'org.python.netty.util.internal.Cleaner0', + + // uses internal java api: sun.misc.Signal + 'jnr.posix.JavaPOSIX', + 'jnr.posix.JavaPOSIX$SunMiscSignalHandler', + + // uses internal java api: sun.misc.Unsafe + 'com.kenai.jffi.MemoryIO$UnsafeImpl', + 'com.kenai.jffi.MemoryIO$UnsafeImpl32', + 'com.kenai.jffi.MemoryIO$UnsafeImpl64', + 'org.python.google.common.cache.Striped64', + 'org.python.google.common.cache.Striped64$1', + 'org.python.google.common.cache.Striped64$Cell', + 'org.python.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'org.python.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'org.python.netty.util.internal.chmv8.ForkJoinPool$2', + 'org.python.netty.util.internal.PlatformDependent0', + 'org.python.netty.util.internal.UnsafeAtomicIntegerFieldUpdater', + 'org.python.netty.util.internal.UnsafeAtomicLongFieldUpdater', + 'org.python.netty.util.internal.UnsafeAtomicReferenceFieldUpdater', + 'org.python.netty.util.internal.chmv8.ConcurrentHashMapV8', + 'org.python.netty.util.internal.chmv8.ConcurrentHashMapV8$1', + 'org.python.netty.util.internal.chmv8.ConcurrentHashMapV8$TreeBin', + 'org.python.netty.util.internal.chmv8.CountedCompleter', + 'org.python.netty.util.internal.chmv8.CountedCompleter$1', + 'org.python.netty.util.internal.chmv8.ForkJoinPool', + 'org.python.netty.util.internal.chmv8.ForkJoinPool$WorkQueue', + 'org.python.netty.util.internal.chmv8.ForkJoinTask', + 'org.python.netty.util.internal.chmv8.ForkJoinTask$1', + + // "uberjaring" (but not shading) classes that have been in the JDK since 1.5 + // nice job python. + 'javax.xml.XMLConstants', + 'javax.xml.datatype.DatatypeConfigurationException', + 'javax.xml.datatype.DatatypeConstants$1', + 'javax.xml.datatype.DatatypeConstants$Field', + 'javax.xml.datatype.DatatypeConstants', + 'javax.xml.datatype.DatatypeFactory', + 'javax.xml.datatype.Duration', + 'javax.xml.datatype.FactoryFinder', + 'javax.xml.datatype.SecuritySupport$1', + 'javax.xml.datatype.SecuritySupport$2', + 'javax.xml.datatype.SecuritySupport$3', + 'javax.xml.datatype.SecuritySupport$4', + 'javax.xml.datatype.SecuritySupport$5', + 'javax.xml.datatype.SecuritySupport', + 'javax.xml.datatype.XMLGregorianCalendar', + 'javax.xml.namespace.NamespaceContext', + 'javax.xml.namespace.QName$1', + 'javax.xml.namespace.QName', + 'javax.xml.parsers.DocumentBuilder', + 'javax.xml.parsers.DocumentBuilderFactory', + 'javax.xml.parsers.FactoryConfigurationError', + 'javax.xml.parsers.FactoryFinder', + 'javax.xml.parsers.ParserConfigurationException', + 'javax.xml.parsers.SAXParser', + 'javax.xml.parsers.SAXParserFactory', + 'javax.xml.parsers.SecuritySupport$1', + 'javax.xml.parsers.SecuritySupport$2', + 'javax.xml.parsers.SecuritySupport$3', + 'javax.xml.parsers.SecuritySupport$4', + 'javax.xml.parsers.SecuritySupport$5', + 'javax.xml.parsers.SecuritySupport', + 'javax.xml.stream.EventFilter', + 'javax.xml.stream.FactoryConfigurationError', + 'javax.xml.stream.FactoryFinder', + 'javax.xml.stream.Location', + 'javax.xml.stream.SecuritySupport$1', + 'javax.xml.stream.SecuritySupport$2', + 'javax.xml.stream.SecuritySupport$3', + 'javax.xml.stream.SecuritySupport$4', + 'javax.xml.stream.SecuritySupport$5', + 'javax.xml.stream.SecuritySupport', + 'javax.xml.stream.StreamFilter', + 'javax.xml.stream.XMLEventFactory', + 'javax.xml.stream.XMLEventReader', + 'javax.xml.stream.XMLEventWriter', + 'javax.xml.stream.XMLInputFactory', + 'javax.xml.stream.XMLOutputFactory', + 'javax.xml.stream.XMLReporter', + 'javax.xml.stream.XMLResolver', + 'javax.xml.stream.XMLStreamConstants', + 'javax.xml.stream.XMLStreamException', + 'javax.xml.stream.XMLStreamReader', + 'javax.xml.stream.XMLStreamWriter', + 'javax.xml.stream.events.Attribute', + 'javax.xml.stream.events.Characters', + 'javax.xml.stream.events.Comment', + 'javax.xml.stream.events.DTD', + 'javax.xml.stream.events.EndDocument', + 'javax.xml.stream.events.EndElement', + 'javax.xml.stream.events.EntityDeclaration', + 'javax.xml.stream.events.EntityReference', + 'javax.xml.stream.events.Namespace', + 'javax.xml.stream.events.NotationDeclaration', + 'javax.xml.stream.events.ProcessingInstruction', + 'javax.xml.stream.events.StartDocument', + 'javax.xml.stream.events.StartElement', + 'javax.xml.stream.events.XMLEvent', + 'javax.xml.stream.util.EventReaderDelegate', + 'javax.xml.stream.util.StreamReaderDelegate', + 'javax.xml.stream.util.XMLEventAllocator', + 'javax.xml.stream.util.XMLEventConsumer', + 'javax.xml.transform.ErrorListener', + 'javax.xml.transform.FactoryFinder', + 'javax.xml.transform.OutputKeys', + 'javax.xml.transform.Result', + 'javax.xml.transform.SecuritySupport$1', + 'javax.xml.transform.SecuritySupport$2', + 'javax.xml.transform.SecuritySupport$3', + 'javax.xml.transform.SecuritySupport$4', + 'javax.xml.transform.SecuritySupport$5', + 'javax.xml.transform.SecuritySupport', + 'javax.xml.transform.Source', + 'javax.xml.transform.SourceLocator', + 'javax.xml.transform.Templates', + 'javax.xml.transform.Transformer', + 'javax.xml.transform.TransformerConfigurationException', + 'javax.xml.transform.TransformerException', + 'javax.xml.transform.TransformerFactory', + 'javax.xml.transform.TransformerFactoryConfigurationError', + 'javax.xml.transform.URIResolver', + 'javax.xml.transform.dom.DOMLocator', + 'javax.xml.transform.dom.DOMResult', + 'javax.xml.transform.dom.DOMSource', + 'javax.xml.transform.sax.SAXResult', + 'javax.xml.transform.sax.SAXSource', + 'javax.xml.transform.sax.SAXTransformerFactory', + 'javax.xml.transform.sax.TemplatesHandler', + 'javax.xml.transform.sax.TransformerHandler', + 'javax.xml.transform.stax.StAXResult', + 'javax.xml.transform.stax.StAXSource', + 'javax.xml.transform.stream.StreamResult', + 'javax.xml.transform.stream.StreamSource', + 'javax.xml.validation.Schema', + 'javax.xml.validation.SchemaFactory', + 'javax.xml.validation.SchemaFactoryFinder$1', + 'javax.xml.validation.SchemaFactoryFinder$2', + 'javax.xml.validation.SchemaFactoryFinder', + 'javax.xml.validation.SchemaFactoryLoader', + 'javax.xml.validation.SecuritySupport$1', + 'javax.xml.validation.SecuritySupport$2', + 'javax.xml.validation.SecuritySupport$3', + 'javax.xml.validation.SecuritySupport$4', + 'javax.xml.validation.SecuritySupport$5', + 'javax.xml.validation.SecuritySupport$6', + 'javax.xml.validation.SecuritySupport$7', + 'javax.xml.validation.SecuritySupport$8', + 'javax.xml.validation.SecuritySupport', + 'javax.xml.validation.TypeInfoProvider', + 'javax.xml.validation.Validator', + 'javax.xml.validation.ValidatorHandler', + 'javax.xml.xpath.SecuritySupport$1', + 'javax.xml.xpath.SecuritySupport$2', + 'javax.xml.xpath.SecuritySupport$3', + 'javax.xml.xpath.SecuritySupport$4', + 'javax.xml.xpath.SecuritySupport$5', + 'javax.xml.xpath.SecuritySupport$6', + 'javax.xml.xpath.SecuritySupport$7', + 'javax.xml.xpath.SecuritySupport$8', + 'javax.xml.xpath.SecuritySupport', + 'javax.xml.xpath.XPath', + 'javax.xml.xpath.XPathConstants', + 'javax.xml.xpath.XPathException', + 'javax.xml.xpath.XPathExpression', + 'javax.xml.xpath.XPathExpressionException', + 'javax.xml.xpath.XPathFactory', + 'javax.xml.xpath.XPathFactoryConfigurationException', + 'javax.xml.xpath.XPathFactoryFinder$1', + 'javax.xml.xpath.XPathFactoryFinder$2', + 'javax.xml.xpath.XPathFactoryFinder', + 'javax.xml.xpath.XPathFunction', + 'javax.xml.xpath.XPathFunctionException', + 'javax.xml.xpath.XPathFunctionResolver', + 'javax.xml.xpath.XPathVariableResolver', + 'org.w3c.dom.Attr', + 'org.w3c.dom.CDATASection', + 'org.w3c.dom.CharacterData', + 'org.w3c.dom.Comment', + 'org.w3c.dom.DOMConfiguration', + 'org.w3c.dom.DOMError', + 'org.w3c.dom.DOMErrorHandler', + 'org.w3c.dom.DOMException', + 'org.w3c.dom.DOMImplementation', + 'org.w3c.dom.DOMImplementationList', + 'org.w3c.dom.DOMImplementationSource', + 'org.w3c.dom.DOMLocator', + 'org.w3c.dom.DOMStringList', + 'org.w3c.dom.Document', + 'org.w3c.dom.DocumentFragment', + 'org.w3c.dom.DocumentType', + 'org.w3c.dom.Element', + 'org.w3c.dom.Entity', + 'org.w3c.dom.EntityReference', + 'org.w3c.dom.NameList', + 'org.w3c.dom.NamedNodeMap', + 'org.w3c.dom.Node', + 'org.w3c.dom.NodeList', + 'org.w3c.dom.Notation', + 'org.w3c.dom.ProcessingInstruction', + 'org.w3c.dom.Text', + 'org.w3c.dom.TypeInfo', + 'org.w3c.dom.UserDataHandler', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry$1', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry$2', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry$3', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry$4', + 'org.w3c.dom.bootstrap.DOMImplementationRegistry', + 'org.w3c.dom.css.CSS2Properties', + 'org.w3c.dom.css.CSSCharsetRule', + 'org.w3c.dom.css.CSSFontFaceRule', + 'org.w3c.dom.css.CSSImportRule', + 'org.w3c.dom.css.CSSMediaRule', + 'org.w3c.dom.css.CSSPageRule', + 'org.w3c.dom.css.CSSPrimitiveValue', + 'org.w3c.dom.css.CSSRule', + 'org.w3c.dom.css.CSSRuleList', + 'org.w3c.dom.css.CSSStyleDeclaration', + 'org.w3c.dom.css.CSSStyleRule', + 'org.w3c.dom.css.CSSStyleSheet', + 'org.w3c.dom.css.CSSUnknownRule', + 'org.w3c.dom.css.CSSValue', + 'org.w3c.dom.css.CSSValueList', + 'org.w3c.dom.css.Counter', + 'org.w3c.dom.css.DOMImplementationCSS', + 'org.w3c.dom.css.DocumentCSS', + 'org.w3c.dom.css.ElementCSSInlineStyle', + 'org.w3c.dom.css.RGBColor', + 'org.w3c.dom.css.Rect', + 'org.w3c.dom.css.ViewCSS', + 'org.w3c.dom.events.DocumentEvent', + 'org.w3c.dom.events.Event', + 'org.w3c.dom.events.EventException', + 'org.w3c.dom.events.EventListener', + 'org.w3c.dom.events.EventTarget', + 'org.w3c.dom.events.MouseEvent', + 'org.w3c.dom.events.MutationEvent', + 'org.w3c.dom.events.UIEvent', + 'org.w3c.dom.html.HTMLAnchorElement', + 'org.w3c.dom.html.HTMLAppletElement', + 'org.w3c.dom.html.HTMLAreaElement', + 'org.w3c.dom.html.HTMLBRElement', + 'org.w3c.dom.html.HTMLBaseElement', + 'org.w3c.dom.html.HTMLBaseFontElement', + 'org.w3c.dom.html.HTMLBodyElement', + 'org.w3c.dom.html.HTMLButtonElement', + 'org.w3c.dom.html.HTMLCollection', + 'org.w3c.dom.html.HTMLDListElement', + 'org.w3c.dom.html.HTMLDOMImplementation', + 'org.w3c.dom.html.HTMLDirectoryElement', + 'org.w3c.dom.html.HTMLDivElement', + 'org.w3c.dom.html.HTMLDocument', + 'org.w3c.dom.html.HTMLElement', + 'org.w3c.dom.html.HTMLFieldSetElement', + 'org.w3c.dom.html.HTMLFontElement', + 'org.w3c.dom.html.HTMLFormElement', + 'org.w3c.dom.html.HTMLFrameElement', + 'org.w3c.dom.html.HTMLFrameSetElement', + 'org.w3c.dom.html.HTMLHRElement', + 'org.w3c.dom.html.HTMLHeadElement', + 'org.w3c.dom.html.HTMLHeadingElement', + 'org.w3c.dom.html.HTMLHtmlElement', + 'org.w3c.dom.html.HTMLIFrameElement', + 'org.w3c.dom.html.HTMLImageElement', + 'org.w3c.dom.html.HTMLInputElement', + 'org.w3c.dom.html.HTMLIsIndexElement', + 'org.w3c.dom.html.HTMLLIElement', + 'org.w3c.dom.html.HTMLLabelElement', + 'org.w3c.dom.html.HTMLLegendElement', + 'org.w3c.dom.html.HTMLLinkElement', + 'org.w3c.dom.html.HTMLMapElement', + 'org.w3c.dom.html.HTMLMenuElement', + 'org.w3c.dom.html.HTMLMetaElement', + 'org.w3c.dom.html.HTMLModElement', + 'org.w3c.dom.html.HTMLOListElement', + 'org.w3c.dom.html.HTMLObjectElement', + 'org.w3c.dom.html.HTMLOptGroupElement', + 'org.w3c.dom.html.HTMLOptionElement', + 'org.w3c.dom.html.HTMLParagraphElement', + 'org.w3c.dom.html.HTMLParamElement', + 'org.w3c.dom.html.HTMLPreElement', + 'org.w3c.dom.html.HTMLQuoteElement', + 'org.w3c.dom.html.HTMLScriptElement', + 'org.w3c.dom.html.HTMLSelectElement', + 'org.w3c.dom.html.HTMLStyleElement', + 'org.w3c.dom.html.HTMLTableCaptionElement', + 'org.w3c.dom.html.HTMLTableCellElement', + 'org.w3c.dom.html.HTMLTableColElement', + 'org.w3c.dom.html.HTMLTableElement', + 'org.w3c.dom.html.HTMLTableRowElement', + 'org.w3c.dom.html.HTMLTableSectionElement', + 'org.w3c.dom.html.HTMLTextAreaElement', + 'org.w3c.dom.html.HTMLTitleElement', + 'org.w3c.dom.html.HTMLUListElement', + 'org.w3c.dom.ls.DOMImplementationLS', + 'org.w3c.dom.ls.LSException', + 'org.w3c.dom.ls.LSInput', + 'org.w3c.dom.ls.LSLoadEvent', + 'org.w3c.dom.ls.LSOutput', + 'org.w3c.dom.ls.LSParser', + 'org.w3c.dom.ls.LSParserFilter', + 'org.w3c.dom.ls.LSProgressEvent', + 'org.w3c.dom.ls.LSResourceResolver', + 'org.w3c.dom.ls.LSSerializer', + 'org.w3c.dom.ls.LSSerializerFilter', + 'org.w3c.dom.ranges.DocumentRange', + 'org.w3c.dom.ranges.Range', + 'org.w3c.dom.ranges.RangeException', + 'org.w3c.dom.stylesheets.DocumentStyle', + 'org.w3c.dom.stylesheets.LinkStyle', + 'org.w3c.dom.stylesheets.MediaList', + 'org.w3c.dom.stylesheets.StyleSheet', + 'org.w3c.dom.stylesheets.StyleSheetList', + 'org.w3c.dom.traversal.DocumentTraversal', + 'org.w3c.dom.traversal.NodeFilter', + 'org.w3c.dom.traversal.NodeIterator', + 'org.w3c.dom.traversal.TreeWalker', + 'org.w3c.dom.views.AbstractView', + 'org.w3c.dom.views.DocumentView', + 'org.w3c.dom.xpath.XPathEvaluator', + 'org.w3c.dom.xpath.XPathException', + 'org.w3c.dom.xpath.XPathExpression', + 'org.w3c.dom.xpath.XPathNSResolver', + 'org.w3c.dom.xpath.XPathNamespace', + 'org.w3c.dom.xpath.XPathResult', + 'org.xml.sax.AttributeList', + 'org.xml.sax.Attributes', + 'org.xml.sax.ContentHandler', + 'org.xml.sax.DTDHandler', + 'org.xml.sax.DocumentHandler', + 'org.xml.sax.EntityResolver', + 'org.xml.sax.ErrorHandler', + 'org.xml.sax.HandlerBase', + 'org.xml.sax.InputSource', + 'org.xml.sax.Locator', + 'org.xml.sax.Parser', + 'org.xml.sax.SAXException', + 'org.xml.sax.SAXNotRecognizedException', + 'org.xml.sax.SAXNotSupportedException', + 'org.xml.sax.SAXParseException', + 'org.xml.sax.XMLFilter', + 'org.xml.sax.XMLReader', + 'org.xml.sax.ext.Attributes2', + 'org.xml.sax.ext.Attributes2Impl', + 'org.xml.sax.ext.DeclHandler', + 'org.xml.sax.ext.DefaultHandler2', + 'org.xml.sax.ext.EntityResolver2', + 'org.xml.sax.ext.LexicalHandler', + 'org.xml.sax.ext.Locator2', + 'org.xml.sax.ext.Locator2Impl', + 'org.xml.sax.helpers.AttributeListImpl', + 'org.xml.sax.helpers.AttributesImpl', + 'org.xml.sax.helpers.DefaultHandler', + 'org.xml.sax.helpers.LocatorImpl', + 'org.xml.sax.helpers.NamespaceSupport$Context', + 'org.xml.sax.helpers.NamespaceSupport', + 'org.xml.sax.helpers.NewInstance', + 'org.xml.sax.helpers.ParserAdapter$AttributeListAdapter', + 'org.xml.sax.helpers.ParserAdapter', + 'org.xml.sax.helpers.ParserFactory', + 'org.xml.sax.helpers.SecuritySupport$1', + 'org.xml.sax.helpers.SecuritySupport$2', + 'org.xml.sax.helpers.SecuritySupport$3', + 'org.xml.sax.helpers.SecuritySupport$4', + 'org.xml.sax.helpers.SecuritySupport', + 'org.xml.sax.helpers.XMLFilterImpl', + 'org.xml.sax.helpers.XMLReaderAdapter$AttributesAdapter', + 'org.xml.sax.helpers.XMLReaderAdapter', + 'org.xml.sax.helpers.XMLReaderFactory', +] diff --git a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java index 1930f530671..c4f109cc782 100644 --- a/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java +++ b/plugins/lang-python/src/main/java/org/elasticsearch/script/python/PythonScriptEngineService.java @@ -60,7 +60,7 @@ import org.python.util.PythonInterpreter; public class PythonScriptEngineService extends AbstractComponent implements ScriptEngineService { private final PythonInterpreter interp; - + @Inject public PythonScriptEngineService(Settings settings) { super(settings); @@ -110,7 +110,7 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri } @Override - public Object compile(String script) { + public Object compile(String script, Map params) { // classloader created here SecurityManager sm = System.getSecurityManager(); if (sm != null) { @@ -293,7 +293,7 @@ public class PythonScriptEngineService extends AbstractComponent implements Scri if (value == null) { return null; } else if (value instanceof PyObject) { - // seems like this is enough, inner PyDictionary will do the conversion for us for example, so expose it directly + // seems like this is enough, inner PyDictionary will do the conversion for us for example, so expose it directly return ((PyObject) value).__tojava__(Object.class); } return value; diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java index e713bd67c92..a0bfab43c54 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptEngineTests.java @@ -29,6 +29,7 @@ import org.junit.After; import org.junit.Before; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -53,7 +54,7 @@ public class PythonScriptEngineTests extends ESTestCase { public void testSimpleEquation() { Map vars = new HashMap(); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "python", se.compile("1 + 2")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testSimpleEquation", "python", se.compile("1 + 2", Collections.emptyMap())), vars).run(); assertThat(((Number) o).intValue(), equalTo(3)); } @@ -63,13 +64,13 @@ public class PythonScriptEngineTests extends ESTestCase { Map obj2 = MapBuilder.newMapBuilder().put("prop2", "value2").map(); Map obj1 = MapBuilder.newMapBuilder().put("prop1", "value1").put("obj2", obj2).put("l", Arrays.asList("2", "1")).map(); vars.put("obj1", obj1); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1", Collections.emptyMap())), vars).run(); assertThat(o, instanceOf(Map.class)); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1['l'][0]")), vars).run(); + o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testMapAccess", "python", se.compile("obj1['l'][0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("2")); } @@ -82,7 +83,7 @@ public class PythonScriptEngineTests extends ESTestCase { vars.put("ctx", ctx); ExecutableScript executable = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testObjectInterMap", "python", - se.compile("ctx['obj2'] = { 'prop2' : 'value2' }; ctx['obj1']['prop1'] = 'uvalue1'")), vars); + se.compile("ctx['obj2'] = { 'prop2' : 'value2' }; ctx['obj1']['prop1'] = 'uvalue1'", Collections.emptyMap())), vars); executable.run(); ctx = (Map) executable.unwrap(vars.get("ctx")); assertThat(ctx.containsKey("obj1"), equalTo(true)); @@ -100,15 +101,15 @@ public class PythonScriptEngineTests extends ESTestCase { // Object o = se.execute(se.compile("l.length"), vars); // assertThat(((Number) o).intValue(), equalTo(4)); - Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[0]")), vars).run(); + Object o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[0]", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("1")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]")), vars).run(); + o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]", Collections.emptyMap())), vars).run(); obj1 = (Map) o; assertThat((String) obj1.get("prop1"), equalTo("value1")); assertThat((String) ((Map) obj1.get("obj2")).get("prop2"), equalTo("value2")); - o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]['prop1']")), vars).run(); + o = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testAccessListInScript", "python", se.compile("l[3]['prop1']", Collections.emptyMap())), vars).run(); assertThat(((String) o), equalTo("value1")); } @@ -116,7 +117,7 @@ public class PythonScriptEngineTests extends ESTestCase { Map vars = new HashMap(); Map ctx = new HashMap(); vars.put("ctx", ctx); - Object compiledScript = se.compile("ctx['value']"); + Object compiledScript = se.compile("ctx['value']", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution1", "python", compiledScript), vars); ctx.put("value", 1); @@ -131,7 +132,7 @@ public class PythonScriptEngineTests extends ESTestCase { public void testChangingVarsCrossExecution2() { Map vars = new HashMap(); Map ctx = new HashMap(); - Object compiledScript = se.compile("value"); + Object compiledScript = se.compile("value", Collections.emptyMap()); ExecutableScript script = se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "testChangingVarsCrossExecution2", "python", compiledScript), vars); script.setNextVar("value", 1); diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java index 7b9663f6b6a..06d3da03ab8 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonScriptMultiThreadedTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -41,7 +42,7 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { public void testExecutableNoRuntimeParams() throws Exception { final PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecutableNoRuntimeParams", "python", compiled); final AtomicBoolean failed = new AtomicBoolean(); @@ -127,7 +128,7 @@ public class PythonScriptMultiThreadedTests extends ESTestCase { public void testExecute() throws Exception { final PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - final Object compiled = se.compile("x + y"); + final Object compiled = se.compile("x + y", Collections.emptyMap()); final CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "testExecute", "python", compiled); final AtomicBoolean failed = new AtomicBoolean(); diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java index e90ac503f13..22471129e82 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/PythonSecurityTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.ESTestCase; import org.python.core.PyException; import java.text.DecimalFormatSymbols; +import java.util.Collections; import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -34,7 +35,7 @@ import java.util.Map; * Tests for Python security permissions */ public class PythonSecurityTests extends ESTestCase { - + private PythonScriptEngineService se; @Override @@ -54,14 +55,14 @@ public class PythonSecurityTests extends ESTestCase { /** runs a script */ private void doTest(String script) { Map vars = new HashMap(); - se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "python", se.compile(script)), vars).run(); + se.executable(new CompiledScript(ScriptService.ScriptType.INLINE, "test", "python", se.compile(script, Collections.emptyMap())), vars).run(); } - + /** asserts that a script runs without exception */ private void assertSuccess(String script) { doTest(script); } - + /** assert that a security exception is hit */ private void assertFailure(String script) { try { @@ -76,13 +77,13 @@ public class PythonSecurityTests extends ESTestCase { } } } - + /** Test some py scripts that are ok */ public void testOK() { assertSuccess("1 + 2"); assertSuccess("from java.lang import Math\nMath.cos(0)"); } - + /** Test some py scripts that should hit security exception */ public void testNotOK() { // sanity check :) @@ -93,7 +94,7 @@ public class PythonSecurityTests extends ESTestCase { // no files assertFailure("from java.io import File\nFile.createTempFile(\"test\", \"tmp\")"); } - + /** Test again from a new thread, python has complex threadlocal configuration */ public void testNotOKFromSeparateThread() throws Exception { Thread t = new Thread() { diff --git a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java index 60e792c34b5..d9559aef16c 100644 --- a/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java +++ b/plugins/lang-python/src/test/java/org/elasticsearch/script/python/SimpleBench.java @@ -25,6 +25,7 @@ import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptService; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -35,7 +36,7 @@ public class SimpleBench { public static void main(String[] args) { PythonScriptEngineService se = new PythonScriptEngineService(Settings.Builder.EMPTY_SETTINGS); - Object compiled = se.compile("x + y"); + Object compiled = se.compile("x + y", Collections.emptyMap()); CompiledScript compiledScript = new CompiledScript(ScriptService.ScriptType.INLINE, "SimpleBench", "python", compiled); diff --git a/plugins/mapper-attachments/build.gradle b/plugins/mapper-attachments/build.gradle index e14cf543043..58f2dceb740 100644 --- a/plugins/mapper-attachments/build.gradle +++ b/plugins/mapper-attachments/build.gradle @@ -55,7 +55,6 @@ dependencies { compile "org.apache.poi:poi-ooxml-schemas:${versions.poi}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile 'org.apache.xmlbeans:xmlbeans:2.6.0' - compile 'stax:stax-api:1.0.1' // MS Office compile "org.apache.poi:poi-scratchpad:${versions.poi}" // Apple iWork @@ -69,3 +68,10 @@ forbiddenPatterns { exclude '**/*.pdf' exclude '**/*.epub' } + +// classes are missing, e.g. org.openxmlformats.schemas.drawingml.x2006.chart.CTExtensionList +thirdPartyAudit.missingClasses = true +thirdPartyAudit.excludes = [ + // uses internal java api: com.sun.syndication (SyndFeedInput, SyndFeed, SyndEntry, SyndContent) + 'org.apache.tika.parser.feed.FeedParser', +] diff --git a/plugins/mapper-attachments/licenses/stax-api-1.0.1.jar.sha1 b/plugins/mapper-attachments/licenses/stax-api-1.0.1.jar.sha1 deleted file mode 100644 index 4426e34685d..00000000000 --- a/plugins/mapper-attachments/licenses/stax-api-1.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49c100caf72d658aca8e58bd74a4ba90fa2b0d70 \ No newline at end of file diff --git a/plugins/mapper-attachments/licenses/stax-api-LICENSE.txt b/plugins/mapper-attachments/licenses/stax-api-LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/plugins/mapper-attachments/licenses/stax-api-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/plugins/mapper-attachments/licenses/stax-api-NOTICE.txt b/plugins/mapper-attachments/licenses/stax-api-NOTICE.txt deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java index eb0e143c946..ffae8205e33 100644 --- a/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java +++ b/plugins/mapper-attachments/src/main/java/org/elasticsearch/mapper/attachments/AttachmentMapper.java @@ -37,7 +37,6 @@ import java.util.*; import static org.elasticsearch.index.mapper.MapperBuilders.*; import static org.elasticsearch.index.mapper.core.TypeParsers.parseMultiField; -import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType; /** *
@@ -65,7 +64,6 @@ public class AttachmentMapper extends FieldMapper {
     public static final String CONTENT_TYPE = "attachment";
 
     public static class Defaults {
-        public static final ContentPath.Type PATH_TYPE = ContentPath.Type.FULL;
 
         public static final AttachmentFieldType FIELD_TYPE = new AttachmentFieldType();
         static {
@@ -108,8 +106,6 @@ public class AttachmentMapper extends FieldMapper {
 
     public static class Builder extends FieldMapper.Builder {
 
-        private ContentPath.Type pathType = Defaults.PATH_TYPE;
-
         private Boolean ignoreErrors = null;
 
         private Integer defaultIndexedChars = null;
@@ -135,16 +131,11 @@ public class AttachmentMapper extends FieldMapper {
         private Mapper.Builder languageBuilder = stringField(FieldNames.LANGUAGE);
 
         public Builder(String name) {
-            super(name, new AttachmentFieldType());
+            super(name, new AttachmentFieldType(), new AttachmentFieldType());
             this.builder = this;
             this.contentBuilder = stringField(FieldNames.CONTENT);
         }
 
-        public Builder pathType(ContentPath.Type pathType) {
-            this.pathType = pathType;
-            return this;
-        }
-
         public Builder content(Mapper.Builder content) {
             this.contentBuilder = content;
             return this;
@@ -192,8 +183,6 @@ public class AttachmentMapper extends FieldMapper {
 
         @Override
         public AttachmentMapper build(BuilderContext context) {
-            ContentPath.Type origPathType = context.path().pathType();
-            context.path().pathType(pathType);
 
             FieldMapper contentMapper;
             if (context.indexCreatedVersion().before(Version.V_2_0_0_beta1)) {
@@ -220,8 +209,6 @@ public class AttachmentMapper extends FieldMapper {
             FieldMapper language = (FieldMapper) languageBuilder.build(context);
             context.path().remove();
 
-            context.path().pathType(origPathType);
-
             if (defaultIndexedChars == null && context.indexSettings() != null) {
                 defaultIndexedChars = context.indexSettings().getAsInt("index.mapping.attachment.indexed_chars", 100000);
             }
@@ -257,7 +244,7 @@ public class AttachmentMapper extends FieldMapper {
 
             defaultFieldType.freeze();
             this.setupFieldType(context);
-            return new AttachmentMapper(name, fieldType, defaultFieldType, pathType, defaultIndexedChars, ignoreErrors, langDetect, contentMapper,
+            return new AttachmentMapper(name, fieldType, defaultFieldType, defaultIndexedChars, ignoreErrors, langDetect, contentMapper,
                     dateMapper, titleMapper, nameMapper, authorMapper, keywordsMapper, contentTypeMapper, contentLength,
                     language, context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
         }
@@ -309,10 +296,7 @@ public class AttachmentMapper extends FieldMapper {
                 Map.Entry entry = iterator.next();
                 String fieldName = entry.getKey();
                 Object fieldNode = entry.getValue();
-                if (fieldName.equals("path") && parserContext.indexVersionCreated().before(Version.V_2_0_0_beta1)) {
-                    builder.pathType(parsePathType(name, fieldNode.toString()));
-                    iterator.remove();
-                } else if (fieldName.equals("fields")) {
+                if (fieldName.equals("fields")) {
                     Map fieldsNode = (Map) fieldNode;
                     for (Iterator> fieldsIterator = fieldsNode.entrySet().iterator(); fieldsIterator.hasNext();) {
                         Map.Entry entry1 = fieldsIterator.next();
@@ -375,8 +359,6 @@ public class AttachmentMapper extends FieldMapper {
         }
     }
 
-    private final ContentPath.Type pathType;
-
     private final int defaultIndexedChars;
 
     private final boolean ignoreErrors;
@@ -401,13 +383,12 @@ public class AttachmentMapper extends FieldMapper {
 
     private final FieldMapper languageMapper;
 
-    public AttachmentMapper(String simpleName, MappedFieldType type, MappedFieldType defaultFieldType, ContentPath.Type pathType, int defaultIndexedChars, Boolean ignoreErrors,
+    public AttachmentMapper(String simpleName, MappedFieldType type, MappedFieldType defaultFieldType, int defaultIndexedChars, Boolean ignoreErrors,
                             Boolean defaultLangDetect, FieldMapper contentMapper,
                             FieldMapper dateMapper, FieldMapper titleMapper, FieldMapper nameMapper, FieldMapper authorMapper,
                             FieldMapper keywordsMapper, FieldMapper contentTypeMapper, FieldMapper contentLengthMapper,
                             FieldMapper languageMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) {
         super(simpleName, type, defaultFieldType, indexSettings, multiFields, copyTo);
-        this.pathType = pathType;
         this.defaultIndexedChars = defaultIndexedChars;
         this.ignoreErrors = ignoreErrors;
         this.defaultLangDetect = defaultLangDetect;
@@ -602,7 +583,7 @@ public class AttachmentMapper extends FieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) {
+    protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
         // ignore this for now
     }
 
@@ -626,9 +607,6 @@ public class AttachmentMapper extends FieldMapper {
     public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
         builder.startObject(simpleName());
         builder.field("type", CONTENT_TYPE);
-        if (indexCreatedBefore2x) {
-            builder.field("path", pathType.name().toLowerCase(Locale.ROOT));
-        }
 
         builder.startObject("fields");
         contentMapper.toXContent(builder, params);
diff --git a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java
index fc17d59603f..f42110c1e62 100644
--- a/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java
+++ b/plugins/mapper-attachments/src/test/java/org/elasticsearch/mapper/attachments/TikaImplTests.java
@@ -1,5 +1,24 @@
 package org.elasticsearch.mapper.attachments;
 
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
 import org.elasticsearch.test.ESTestCase;
 
 public class TikaImplTests extends ESTestCase {
diff --git a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java
index 60c31c3f765..03b00d2ac39 100644
--- a/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java
+++ b/plugins/mapper-murmur3/src/main/java/org/elasticsearch/index/mapper/murmur3/Murmur3FieldMapper.java
@@ -66,8 +66,7 @@ public class Murmur3FieldMapper extends LongFieldMapper {
             Murmur3FieldMapper fieldMapper = new Murmur3FieldMapper(name, fieldType, defaultFieldType,
                     ignoreMalformed(context), coerce(context),
                     context.indexSettings(), multiFieldsBuilder.build(this, context), copyTo);
-            fieldMapper.includeInAll(includeInAll);
-            return fieldMapper;
+            return (Murmur3FieldMapper) fieldMapper.includeInAll(includeInAll);
         }
 
         @Override
diff --git a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java
index aaf46553a75..1e27e18bac7 100644
--- a/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java
+++ b/plugins/mapper-size/src/main/java/org/elasticsearch/index/mapper/size/SizeFieldMapper.java
@@ -28,7 +28,6 @@ import org.elasticsearch.index.analysis.NumericIntegerAnalyzer;
 import org.elasticsearch.index.mapper.MappedFieldType;
 import org.elasticsearch.index.mapper.Mapper;
 import org.elasticsearch.index.mapper.MapperParsingException;
-import org.elasticsearch.index.mapper.MergeResult;
 import org.elasticsearch.index.mapper.MetadataFieldMapper;
 import org.elasticsearch.index.mapper.ParseContext;
 import org.elasticsearch.index.mapper.core.IntegerFieldMapper;
@@ -67,7 +66,7 @@ public class SizeFieldMapper extends MetadataFieldMapper {
         protected EnabledAttributeMapper enabledState = EnabledAttributeMapper.UNSET_DISABLED;
 
         public Builder(MappedFieldType existing) {
-            super(NAME, existing == null ? Defaults.SIZE_FIELD_TYPE : existing);
+            super(NAME, existing == null ? Defaults.SIZE_FIELD_TYPE : existing, Defaults.SIZE_FIELD_TYPE);
             builder = this;
         }
 
@@ -177,12 +176,10 @@ public class SizeFieldMapper extends MetadataFieldMapper {
     }
 
     @Override
-    public void merge(Mapper mergeWith, MergeResult mergeResult) {
+    protected void doMerge(Mapper mergeWith, boolean updateAllTypes) {
         SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith;
-        if (!mergeResult.simulate()) {
-            if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) {
-                this.enabledState = sizeFieldMapperMergeWith.enabledState;
-            }
+        if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) {
+            this.enabledState = sizeFieldMapperMergeWith.enabledState;
         }
     }
 }
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
new file mode 100644
index 00000000000..ca444768590
--- /dev/null
+++ b/plugins/repository-hdfs/build.gradle
@@ -0,0 +1,206 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+//apply plugin: 'nebula.provided-base'
+ 
+esplugin {
+  description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.'
+  classname 'org.elasticsearch.plugin.hadoop.hdfs.HdfsPlugin'
+}
+
+configurations {
+    hadoop1
+    hadoop2
+}
+
+versions << [
+  'hadoop1': '1.2.1',
+  'hadoop2': '2.7.1'
+]
+
+dependencies {
+  provided "org.elasticsearch:elasticsearch:${versions.elasticsearch}"
+  provided "org.apache.hadoop:hadoop-core:${versions.hadoop1}"
+
+  // use Hadoop1 to compile and test things (a subset of Hadoop2)
+  testCompile "org.apache.hadoop:hadoop-core:${versions.hadoop1}"
+  testCompile "org.apache.hadoop:hadoop-test:${versions.hadoop1}"
+  // Hadoop dependencies
+  testCompile "commons-configuration:commons-configuration:1.6"
+  testCompile "commons-lang:commons-lang:${versions.commonslang}"
+  testCompile "commons-collections:commons-collections:3.2.2"
+  testCompile "commons-net:commons-net:1.4.1"
+  testCompile "org.mortbay.jetty:jetty:6.1.26"
+  testCompile "org.mortbay.jetty:jetty-util:6.1.26"
+  testCompile "org.mortbay.jetty:servlet-api:2.5-20081211"
+  testCompile "com.sun.jersey:jersey-core:1.8"
+  
+
+  hadoop1("org.apache.hadoop:hadoop-core:${versions.hadoop1}") {
+    exclude module: "commons-cli"
+    exclude group: "com.sun.jersey"
+    exclude group: "org.mortbay.jetty"
+    exclude group: "tomcat"
+    exclude module: "commons-el"
+    exclude module: "hsqldb"
+    exclude group: "org.eclipse.jdt"
+    exclude module: "commons-beanutils"
+    exclude module: "commons-beanutils-core"
+    exclude module: "junit"
+    // provided by ES itself
+    exclude group: "log4j"
+  }
+
+  hadoop2("org.apache.hadoop:hadoop-client:${versions.hadoop2}") {
+    exclude module: "commons-cli"
+    exclude group: "com.sun.jersey"
+    exclude group: "com.sun.jersey.contribs"
+    exclude group: "com.sun.jersey.jersey-test-framework"
+    exclude module: "guice"
+    exclude group: "org.mortbay.jetty"
+    exclude group: "tomcat"
+    exclude module: "commons-el"
+    exclude module: "hsqldb"
+    exclude group: "org.eclipse.jdt"
+    exclude module: "commons-beanutils"
+    exclude module: "commons-beanutils-core"
+    exclude module: "javax.servlet"
+    exclude module: "junit"
+    // provided by ES itself
+    exclude group: "log4j"
+  }
+
+  hadoop2("org.apache.hadoop:hadoop-hdfs:${versions.hadoop2}") {
+    exclude module: "guava"
+    exclude module: "junit"
+    // provided by ES itself
+    exclude group: "log4j"
+  }
+} 
+
+configurations.all {
+    resolutionStrategy {
+        force "commons-codec:commons-codec:${versions.commonscodec}"
+        force "commons-logging:commons-logging:${versions.commonslogging}"
+        force "commons-lang:commons-lang:2.6"
+        force "commons-httpclient:commons-httpclient:3.0.1"
+        force "org.codehaus.jackson:jackson-core-asl:1.8.8"
+        force "org.codehaus.jackson:jackson-mapper-asl:1.8.8"
+        force "com.google.code.findbugs:jsr305:3.0.0"
+        force "com.google.guava:guava:16.0.1"
+        force "org.slf4j:slf4j-api:1.7.10"
+        force "org.slf4j:slf4j-log4j12:1.7.10"
+    }
+}
+
+
+dependencyLicenses {
+  mapping from: /hadoop-core.*/, to: 'hadoop-1'
+  mapping from: /hadoop-.*/, to: 'hadoop-2'
+}
+
+compileJava.options.compilerArgs << '-Xlint:-deprecation,-rawtypes'
+
+// main jar includes just the plugin classes
+jar {
+    include "org/elasticsearch/plugin/hadoop/hdfs/*"
+}
+
+// hadoop jar (which actually depend on Hadoop)
+task hadoopLinkedJar(type: Jar, dependsOn:jar) {
+    appendix "internal"
+    from sourceSets.main.output.classesDir
+    // exclude plugin
+    exclude "org/elasticsearch/plugin/hadoop/hdfs/*"
+}
+
+
+bundlePlugin.dependsOn hadoopLinkedJar
+
+// configure 'bundle' as being w/o Hadoop deps
+bundlePlugin {
+    into ("internal-libs") {
+        from hadoopLinkedJar.archivePath
+    }
+    
+    into ("hadoop-libs") {
+        from configurations.hadoop2.allArtifacts.files
+        from configurations.hadoop2
+    }
+}
+
+
+task distZipHadoop1(type: Zip, dependsOn: [hadoopLinkedJar, jar]) { zipTask ->
+    from (zipTree(bundlePlugin.archivePath)) {
+        include "*"
+        include "internal-libs/**"
+    }
+    
+    description = "Builds archive (with Hadoop1 dependencies) suitable for download page."
+    classifier = "hadoop1"
+
+    into ("hadoop-libs") {
+        from configurations.hadoop1.allArtifacts.files
+        from configurations.hadoop1
+    }
+}
+
+task distZipHadoop2(type: Zip, dependsOn: [hadoopLinkedJar, jar]) { zipTask ->
+    from (zipTree(bundlePlugin.archivePath)) {
+        include "*"
+        include "internal-libs/**"
+    }
+        
+    description = "Builds archive (with Hadoop2/YARN dependencies) suitable for download page."
+    classifier = "hadoop2"
+
+    into ("hadoop-libs") {
+        from configurations.hadoop2.allArtifacts.files
+        from configurations.hadoop2
+    }
+}
+
+task distZipNoHadoop(type: Zip, dependsOn: [hadoopLinkedJar, jar]) { zipTask ->
+    from (zipTree(bundlePlugin.archivePath)) {
+        exclude "hadoop-libs/**"
+    }
+    
+    from sourceSets.main.output.resourcesDir
+
+    description = "Builds archive (without any Hadoop dependencies) suitable for download page."
+    classifier = "lite"
+}
+
+
+artifacts {
+    archives bundlePlugin
+    'default' bundlePlugin
+    archives distZipHadoop1
+    archives distZipHadoop2
+    archives distZipNoHadoop
+}
+
+integTest {
+    cluster {
+        plugin(pluginProperties.extension.name, zipTree(distZipHadoop2.archivePath))
+    }
+}
+
+// classes are missing, e.g. org.mockito.Mockito
+thirdPartyAudit.missingClasses = true
diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsPlugin.java
new file mode 100644
index 00000000000..9b65f7bec2f
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsPlugin.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.hadoop.hdfs;
+
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.nio.file.Path;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Locale;
+
+import org.elasticsearch.SpecialPermission;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.io.FileSystemUtils;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.repositories.RepositoriesModule;
+import org.elasticsearch.repositories.Repository;
+
+//
+// Note this plugin is somewhat special as Hadoop itself loads a number of libraries and thus requires a number of permissions to run even in client mode.
+// This poses two problems:
+// - Hadoop itself comes with tons of jars, many providing the same classes across packages. In particular Hadoop 2 provides package annotations in the same
+//   package across jars which trips JarHell. Thus, to allow Hadoop jars to load, the plugin uses a dedicated CL which picks them up from the hadoop-libs folder.
+// - The issue though with using a different CL is that it picks up the jars from a different location / codeBase and thus it does not fall under the plugin
+//   permissions. In other words, the plugin permissions don't apply to the hadoop libraries.  
+//   There are different approaches here:
+//      - implement a custom classloader that loads the jars but 'lies' about the codesource. It is doable but since URLClassLoader is locked down, one would
+//        would have to implement the whole jar opening and loading from it. Not impossible but still fairly low-level.
+//        Further more, even if the code has the proper credentials, it needs to use the proper Privileged blocks to use its full permissions which does not
+//        happen in the Hadoop code base.
+//      - use a different Policy. Works but the Policy is JVM wide and thus the code needs to be quite efficient - quite a bit impact to cover just some plugin
+//        libraries
+//      - use a DomainCombiner. This doesn't change the semantics (it's clear where the code is loaded from, etc..) however it gives us a scoped, fine-grained
+//        callback on handling the permission intersection for secured calls. Note that DC works only in the current PAC call - the moment another PA is used,
+//        the domain combiner is going to be ignored (unless the caller specifically uses it). Due to its scoped impact and official Java support, this approach
+//        was used.
+
+// ClassLoading info
+// - package plugin.hadoop.hdfs is part of the plugin
+// - all the other packages are assumed to be in the nested Hadoop CL.
+
+// Code 
+public class HdfsPlugin extends Plugin {
+
+    @Override
+    public String name() {
+        return "repository-hdfs";
+    }
+
+    @Override
+    public String description() {
+        return "HDFS Repository Plugin";
+    }
+
+    @SuppressWarnings("unchecked")
+    public void onModule(RepositoriesModule repositoriesModule) {
+        String baseLib = Utils.detectLibFolder();
+        List cp = getHadoopClassLoaderPath(baseLib);
+        
+        ClassLoader hadoopCL = URLClassLoader.newInstance(cp.toArray(new URL[cp.size()]), getClass().getClassLoader());
+
+        Class repository = null;
+        try {
+            repository = (Class) hadoopCL.loadClass("org.elasticsearch.repositories.hdfs.HdfsRepository");
+        } catch (ClassNotFoundException cnfe) {
+            throw new IllegalStateException("Cannot load plugin class; is the plugin class setup correctly?", cnfe);
+        }
+
+        repositoriesModule.registerRepository("hdfs", repository, BlobStoreIndexShardRepository.class);
+        Loggers.getLogger(HdfsPlugin.class).info("Loaded Hadoop [{}] libraries from {}", getHadoopVersion(hadoopCL), baseLib);
+    }
+
+    protected List getHadoopClassLoaderPath(String baseLib) {
+        List cp = new ArrayList<>();
+        // add plugin internal jar
+        discoverJars(createURI(baseLib, "internal-libs"), cp, false);
+        // add Hadoop jars
+        discoverJars(createURI(baseLib, "hadoop-libs"), cp, true);
+        return cp;
+    }
+
+    private String getHadoopVersion(ClassLoader hadoopCL) {
+        SecurityManager sm = System.getSecurityManager();
+        if (sm != null) {
+            // unprivileged code such as scripts do not have SpecialPermission
+            sm.checkPermission(new SpecialPermission());
+        }
+
+        return AccessController.doPrivileged(new PrivilegedAction() {
+            @Override
+            public String run() {
+                // Hadoop 2 relies on TCCL to determine the version
+                ClassLoader tccl = Thread.currentThread().getContextClassLoader();
+                try {
+                    Thread.currentThread().setContextClassLoader(hadoopCL);
+                    return doGetHadoopVersion(hadoopCL);
+                } finally {
+                    Thread.currentThread().setContextClassLoader(tccl);
+                }
+            }
+        }, Utils.hadoopACC());
+    }
+
+    private String doGetHadoopVersion(ClassLoader hadoopCL) {
+        String version = "Unknown";
+
+        Class clz = null;
+        try {
+            clz = hadoopCL.loadClass("org.apache.hadoop.util.VersionInfo");
+        } catch (ClassNotFoundException cnfe) {
+            // unknown
+        }
+        if (clz != null) {
+            try {
+                Method method = clz.getMethod("getVersion");
+                version = method.invoke(null).toString();
+            } catch (Exception ex) {
+                // class has changed, ignore
+            }
+        }
+
+        return version;
+    }
+
+    private URI createURI(String base, String suffix) {
+        String location = base + suffix;
+        try {
+            return new URI(location);
+        } catch (URISyntaxException ex) {
+            throw new IllegalStateException(String.format(Locale.ROOT, "Cannot detect plugin folder; [%s] seems invalid", location), ex);
+        }
+    }
+
+    @SuppressForbidden(reason = "discover nested jar")
+    private void discoverJars(URI libPath, List cp, boolean optional) {
+        try {
+            Path[] jars = FileSystemUtils.files(PathUtils.get(libPath), "*.jar");
+
+            for (Path path : jars) {
+                cp.add(path.toUri().toURL());
+            }
+        } catch (IOException ex) {
+            if (!optional) {
+                throw new IllegalStateException("Cannot compute plugin classpath", ex);
+            }
+        }
+    }
+}
\ No newline at end of file
diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java
new file mode 100644
index 00000000000..cf786179787
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/plugin/hadoop/hdfs/Utils.java
@@ -0,0 +1,103 @@
+package org.elasticsearch.plugin.hadoop.hdfs;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.net.URL;
+import java.security.AccessControlContext;
+import java.security.AccessController;
+import java.security.DomainCombiner;
+import java.security.PrivilegedAction;
+import java.security.ProtectionDomain;
+
+import org.elasticsearch.SpecialPermission;
+
+public abstract class Utils {
+
+    protected static AccessControlContext hadoopACC() {
+        SecurityManager sm = System.getSecurityManager();
+        if (sm != null) {
+            // unprivileged code such as scripts do not have SpecialPermission
+            sm.checkPermission(new SpecialPermission());
+        }
+
+        return AccessController.doPrivileged(new PrivilegedAction() {
+            @Override
+            public AccessControlContext run() {
+                return new AccessControlContext(AccessController.getContext(), new HadoopDomainCombiner());
+            }
+        });
+    }
+
+    private static class HadoopDomainCombiner implements DomainCombiner {
+
+        private static String BASE_LIB = detectLibFolder();
+
+        @Override
+        public ProtectionDomain[] combine(ProtectionDomain[] currentDomains, ProtectionDomain[] assignedDomains) {
+            for (ProtectionDomain pd : assignedDomains) {
+                if (pd.getCodeSource().getLocation().toString().startsWith(BASE_LIB)) {
+                    return assignedDomains;
+                }
+            }
+
+            return currentDomains;
+        }
+    }
+
+    static String detectLibFolder() {
+        ClassLoader cl = Utils.class.getClassLoader();
+
+        // we could get the URL from the URLClassloader directly
+        // but that can create issues when running the tests from the IDE
+        // we could detect that by loading resources but that as well relies on
+        // the JAR URL
+        String classToLookFor = HdfsPlugin.class.getName().replace(".", "/").concat(".class");
+        URL classURL = cl.getResource(classToLookFor);
+        if (classURL == null) {
+            throw new IllegalStateException("Cannot detect itself; something is wrong with this ClassLoader " + cl);
+        }
+
+        String base = classURL.toString();
+
+        // extract root
+        // typically a JAR URL
+        int index = base.indexOf("!/");
+        if (index > 0) {
+            base = base.substring(0, index);
+            // remove its prefix (jar:)
+            base = base.substring(4);
+            // remove the trailing jar
+            index = base.lastIndexOf("/");
+            base = base.substring(0, index + 1);
+        }
+        // not a jar - something else, do a best effort here
+        else {
+            // remove the class searched
+            base = base.substring(0, base.length() - classToLookFor.length());
+        }
+
+        // append /
+        if (!base.endsWith("/")) {
+            base = base.concat("/");
+        }
+
+        return base;
+    }
+}
diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FileSystemFactory.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FileSystemFactory.java
new file mode 100644
index 00000000000..5e7c4d3fa57
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FileSystemFactory.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.hdfs;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+
+interface FileSystemFactory {
+
+    FileSystem getFileSystem() throws IOException;
+}
diff --git a/core/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FsCallback.java
similarity index 60%
rename from core/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java
rename to plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FsCallback.java
index b537c448bea..3eda2272149 100644
--- a/core/src/main/java/org/elasticsearch/cluster/settings/ClusterDynamicSettings.java
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/FsCallback.java
@@ -17,22 +17,13 @@
  * under the License.
  */
 
-package org.elasticsearch.cluster.settings;
+package org.elasticsearch.repositories.hdfs;
 
-import org.elasticsearch.common.inject.BindingAnnotation;
+import java.io.IOException;
 
-import java.lang.annotation.Documented;
-import java.lang.annotation.Retention;
-import java.lang.annotation.Target;
+import org.apache.hadoop.fs.FileSystem;
 
-import static java.lang.annotation.ElementType.FIELD;
-import static java.lang.annotation.ElementType.PARAMETER;
-import static java.lang.annotation.RetentionPolicy.RUNTIME;
+interface FsCallback {
 
-
-@BindingAnnotation
-@Target({FIELD, PARAMETER})
-@Retention(RUNTIME)
-@Documented
-public @interface ClusterDynamicSettings {
-}
\ No newline at end of file
+    V doInHdfs(FileSystem fs) throws IOException;
+}
diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java
new file mode 100644
index 00000000000..f71ca7020a8
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java
@@ -0,0 +1,173 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.hdfs;
+
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.elasticsearch.common.Nullable;
+import org.elasticsearch.common.blobstore.BlobMetaData;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
+import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.io.Streams;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.Locale;
+import java.util.Map;
+
+public class HdfsBlobContainer extends AbstractBlobContainer {
+
+    protected final HdfsBlobStore blobStore;
+    protected final Path path;
+
+    public HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore blobStore, Path path) {
+        super(blobPath);
+        this.blobStore = blobStore;
+        this.path = path;
+    }
+
+    @Override
+    public boolean blobExists(String blobName) {
+        try {
+            return SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() {
+                @Override
+                public Boolean doInHdfs(FileSystem fs) throws IOException {
+                    return fs.exists(new Path(path, blobName));
+                }
+            });
+        } catch (Exception e) {
+            return false;
+        }
+    }
+
+    @Override
+    public void deleteBlob(String blobName) throws IOException {
+        SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() {
+            @Override
+            public Boolean doInHdfs(FileSystem fs) throws IOException {
+                return fs.delete(new Path(path, blobName), true);
+            }
+        });
+    }
+
+    @Override
+    public void move(String sourceBlobName, String targetBlobName) throws IOException {
+        boolean rename = SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() {
+            @Override
+            public Boolean doInHdfs(FileSystem fs) throws IOException {
+                return fs.rename(new Path(path, sourceBlobName), new Path(path, targetBlobName));
+            }
+        });
+        
+        if (!rename) {
+            throw new IOException(String.format(Locale.ROOT, "can not move blob from [%s] to [%s]", sourceBlobName, targetBlobName));
+        }
+    }
+
+    @Override
+    public InputStream readBlob(String blobName) throws IOException {
+        // FSDataInputStream does buffering internally
+        return SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() {
+            @Override
+            public InputStream doInHdfs(FileSystem fs) throws IOException {
+                return fs.open(new Path(path, blobName), blobStore.bufferSizeInBytes());
+            }
+        });
+    }
+
+    @Override
+    public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {
+        SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() {
+            @Override
+            public Void doInHdfs(FileSystem fs) throws IOException {
+                try (OutputStream stream = createOutput(blobName)) {
+                    Streams.copy(inputStream, stream);
+                }
+                return null;
+            }
+        });
+    }
+
+    @Override
+    public void writeBlob(String blobName, BytesReference bytes) throws IOException {
+        SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() {
+            @Override
+            public Void doInHdfs(FileSystem fs) throws IOException {
+                try (OutputStream stream = createOutput(blobName)) {
+                    bytes.writeTo(stream);
+                }
+                return null;
+            }
+        });
+    }
+    
+    private OutputStream createOutput(String blobName) throws IOException {
+        Path file = new Path(path, blobName);
+        // FSDataOutputStream does buffering internally
+        return blobStore.fileSystemFactory().getFileSystem().create(file, true, blobStore.bufferSizeInBytes());
+    }
+
+    @Override
+    public Map listBlobsByPrefix(final @Nullable String blobNamePrefix) throws IOException {
+        FileStatus[] files = SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() {
+            @Override
+            public FileStatus[] doInHdfs(FileSystem fs) throws IOException {
+                return fs.listStatus(path, new PathFilter() {
+                    @Override
+                    public boolean accept(Path path) {
+                        return path.getName().startsWith(blobNamePrefix);
+                    }
+                });
+            }
+        });
+        if (files == null || files.length == 0) {
+            return Collections.emptyMap();
+        }
+        Map map = new LinkedHashMap();
+        for (FileStatus file : files) {
+            map.put(file.getPath().getName(), new PlainBlobMetaData(file.getPath().getName(), file.getLen()));
+        }
+        return Collections.unmodifiableMap(map);
+    }
+
+    @Override
+    public Map listBlobs() throws IOException {
+        FileStatus[] files = SecurityUtils.execute(blobStore.fileSystemFactory(), new FsCallback() {
+            @Override
+            public FileStatus[] doInHdfs(FileSystem fs) throws IOException {
+                return fs.listStatus(path);
+            }
+        });
+        if (files == null || files.length == 0) {
+            return Collections.emptyMap();
+        }
+        Map map = new LinkedHashMap();
+        for (FileStatus file : files) {
+            map.put(file.getPath().getName(), new PlainBlobMetaData(file.getPath().getName(), file.getLen()));
+        }
+        return Collections.unmodifiableMap(map);
+    }
+}
\ No newline at end of file
diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java
new file mode 100644
index 00000000000..b75485fa7fe
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.hdfs;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.blobstore.BlobContainer;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.component.AbstractComponent;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.unit.ByteSizeUnit;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.threadpool.ThreadPool;
+
+import java.io.IOException;
+import java.util.concurrent.Executor;
+
+public class HdfsBlobStore extends AbstractComponent implements BlobStore {
+
+    private final FileSystemFactory ffs;
+    private final Path rootHdfsPath;
+    private final ThreadPool threadPool;
+    private final int bufferSizeInBytes;
+
+    public HdfsBlobStore(Settings settings, FileSystemFactory ffs, Path path, ThreadPool threadPool) throws IOException {
+        super(settings);
+        this.ffs = ffs;
+        this.rootHdfsPath = path;
+        this.threadPool = threadPool;
+
+        this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();
+
+        mkdirs(path);
+    }
+
+    private void mkdirs(Path path) throws IOException {
+        SecurityUtils.execute(ffs, new FsCallback() {
+            @Override
+            public Void doInHdfs(FileSystem fs) throws IOException {
+                if (!fs.exists(path)) {
+                    fs.mkdirs(path);
+                }
+                return null;
+            }
+        });
+    }
+
+    @Override
+    public String toString() {
+        return rootHdfsPath.toUri().toString();
+    }
+
+    public FileSystemFactory fileSystemFactory() {
+        return ffs;
+    }
+
+    public Path path() {
+        return rootHdfsPath;
+    }
+
+    public Executor executor() {
+        return threadPool.executor(ThreadPool.Names.SNAPSHOT);
+    }
+
+    public int bufferSizeInBytes() {
+        return bufferSizeInBytes;
+    }
+
+    @Override
+    public BlobContainer blobContainer(BlobPath path) {
+        return new HdfsBlobContainer(path, this, buildHdfsPath(path));
+    }
+
+    @Override
+    public void delete(BlobPath path) throws IOException {
+        SecurityUtils.execute(ffs, new FsCallback() {
+            @Override
+            public Void doInHdfs(FileSystem fs) throws IOException {
+                fs.delete(translateToHdfsPath(path), true);
+                return null;
+            }
+        });
+    }
+
+    private Path buildHdfsPath(BlobPath blobPath) {
+        final Path path = translateToHdfsPath(blobPath);
+        try {
+            mkdirs(path);
+        } catch (IOException ex) {
+            throw new ElasticsearchException("failed to create blob container", ex);
+        }
+        return path;
+    }
+
+    private Path translateToHdfsPath(BlobPath blobPath) {
+        Path path = path();
+        for (String p : blobPath) {
+            path = new Path(path, p);
+        }
+        return path;
+    }
+
+    @Override
+    public void close() {
+        //
+    }
+}
\ No newline at end of file
diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java
new file mode 100644
index 00000000000..11081445fd4
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java
@@ -0,0 +1,259 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.repositories.hdfs;
+
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URL;
+import java.nio.file.Files;
+import java.security.AccessController;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+import java.util.Locale;
+import java.util.Map;
+import java.util.Map.Entry;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.ElasticsearchGenerationException;
+import org.elasticsearch.SpecialPermission;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.SuppressForbidden;
+import org.elasticsearch.common.blobstore.BlobPath;
+import org.elasticsearch.common.blobstore.BlobStore;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.unit.ByteSizeValue;
+import org.elasticsearch.index.snapshots.IndexShardRepository;
+import org.elasticsearch.repositories.RepositoryName;
+import org.elasticsearch.repositories.RepositorySettings;
+import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
+import org.elasticsearch.threadpool.ThreadPool;
+
+public class HdfsRepository extends BlobStoreRepository implements FileSystemFactory {
+
+    public final static String TYPE = "hdfs";
+
+    private final HdfsBlobStore blobStore;
+    private final BlobPath basePath;
+    private final ByteSizeValue chunkSize;
+    private final boolean compress;
+    private final RepositorySettings repositorySettings;
+    private FileSystem fs;
+
+    @Inject
+    public HdfsRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, ThreadPool threadPool) throws IOException {
+        super(name.getName(), repositorySettings, indexShardRepository);
+
+        this.repositorySettings = repositorySettings;
+
+        String path = repositorySettings.settings().get("path", settings.get("path"));
+        if (path == null) {
+            throw new IllegalArgumentException("no 'path' defined for hdfs snapshot/restore");
+        }
+
+        // get configuration
+        fs = getFileSystem();
+        Path hdfsPath = SecurityUtils.execute(fs, new FsCallback() {
+            @Override
+            public Path doInHdfs(FileSystem fs) throws IOException {
+                return fs.makeQualified(new Path(path));
+            }
+        });
+        this.basePath = BlobPath.cleanPath();
+
+        logger.debug("Using file-system [{}] for URI [{}], path [{}]", fs, fs.getUri(), hdfsPath);
+        blobStore = new HdfsBlobStore(settings, this, hdfsPath, threadPool);
+        this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", settings.getAsBytesSize("chunk_size", null));
+        this.compress = repositorySettings.settings().getAsBoolean("compress", settings.getAsBoolean("compress", false));
+    }
+
+    // as the FileSystem is long-lived and might go away, make sure to check it before it's being used.
+    @Override
+    public FileSystem getFileSystem() throws IOException {
+        SecurityManager sm = System.getSecurityManager();
+        if (sm != null) {
+            // unprivileged code such as scripts do not have SpecialPermission
+            sm.checkPermission(new SpecialPermission());
+        }
+
+        try {
+            return AccessController.doPrivileged(new PrivilegedExceptionAction() {
+                @Override
+                public FileSystem run() throws IOException {
+                    return doGetFileSystem();
+                }
+            }, SecurityUtils.AccBridge.acc());
+        } catch (PrivilegedActionException pae) {
+            Throwable th = pae.getCause();
+            if (th instanceof Error) {
+                throw (Error) th;
+            }
+            if (th instanceof RuntimeException) {
+                throw (RuntimeException) th;
+            }
+            if (th instanceof IOException) {
+                throw (IOException) th;
+            }
+            throw new ElasticsearchException(pae);
+        }
+    }
+
+    private FileSystem doGetFileSystem() throws IOException {
+        // check if the fs is still alive
+        // make a cheap call that triggers little to no security checks
+        if (fs != null) {
+            try {
+                fs.isFile(fs.getWorkingDirectory());
+            } catch (IOException ex) {
+                if (ex.getMessage().contains("Filesystem closed")) {
+                    fs = null;
+                }
+                else {
+                    throw ex;
+                }
+            }
+        }
+        if (fs == null) {
+            Thread th = Thread.currentThread();
+            ClassLoader oldCL = th.getContextClassLoader();
+            try {
+                th.setContextClassLoader(getClass().getClassLoader());
+                return initFileSystem(repositorySettings);
+            } catch (IOException ex) {
+                throw ex;
+            } finally {
+                th.setContextClassLoader(oldCL);
+            }
+        }
+        return fs;
+    }
+
+    private FileSystem initFileSystem(RepositorySettings repositorySettings) throws IOException {
+
+        Configuration cfg = new Configuration(repositorySettings.settings().getAsBoolean("load_defaults", settings.getAsBoolean("load_defaults", true)));
+        cfg.setClassLoader(this.getClass().getClassLoader());
+        cfg.reloadConfiguration();
+
+        String confLocation = repositorySettings.settings().get("conf_location", settings.get("conf_location"));
+        if (Strings.hasText(confLocation)) {
+            for (String entry : Strings.commaDelimitedListToStringArray(confLocation)) {
+                addConfigLocation(cfg, entry.trim());
+            }
+        }
+
+        Map map = repositorySettings.settings().getByPrefix("conf.").getAsMap();
+        for (Entry entry : map.entrySet()) {
+            cfg.set(entry.getKey(), entry.getValue());
+        }
+
+        try {
+            UserGroupInformation.setConfiguration(cfg);
+        } catch (Throwable th) {
+            throw new ElasticsearchGenerationException(String.format(Locale.ROOT, "Cannot initialize Hadoop"), th);
+        }
+
+        String uri = repositorySettings.settings().get("uri", settings.get("uri"));
+        URI actualUri = (uri != null ? URI.create(uri) : FileSystem.getDefaultUri(cfg));
+        String user = repositorySettings.settings().get("user", settings.get("user"));
+
+        try {
+            // disable FS cache
+            String disableFsCache = String.format(Locale.ROOT, "fs.%s.impl.disable.cache", actualUri.getScheme());
+            cfg.setBoolean(disableFsCache, true);
+
+            return (user != null ? FileSystem.get(actualUri, cfg, user) : FileSystem.get(actualUri, cfg));
+        } catch (Exception ex) {
+            throw new ElasticsearchGenerationException(String.format(Locale.ROOT, "Cannot create Hdfs file-system for uri [%s]", actualUri), ex);
+        }
+    }
+
+    @SuppressForbidden(reason = "pick up Hadoop config (which can be on HDFS)")
+    private void addConfigLocation(Configuration cfg, String confLocation) {
+        URL cfgURL = null;
+        // it's an URL
+        if (!confLocation.contains(":")) {
+            cfgURL = cfg.getClassLoader().getResource(confLocation);
+
+            // fall back to file
+            if (cfgURL == null) {
+                java.nio.file.Path path = PathUtils.get(confLocation);
+                if (!Files.isReadable(path)) {
+                    throw new IllegalArgumentException(
+                            String.format(Locale.ROOT,
+                                    "Cannot find classpath resource or file 'conf_location' [%s] defined for hdfs snapshot/restore",
+                                    confLocation));
+                }
+                String pathLocation = path.toUri().toString();
+                logger.debug("Adding path [{}] as file [{}]", confLocation, pathLocation);
+                confLocation = pathLocation;
+            }
+            else {
+                logger.debug("Resolving path [{}] to classpath [{}]", confLocation, cfgURL);
+            }
+        }
+        else {
+            logger.debug("Adding path [{}] as URL", confLocation);
+        }
+
+        if (cfgURL == null) {
+            try {
+                cfgURL = new URL(confLocation);
+            } catch (MalformedURLException ex) {
+                throw new IllegalArgumentException(String.format(Locale.ROOT,
+                        "Invalid 'conf_location' URL [%s] defined for hdfs snapshot/restore", confLocation), ex);
+            }
+        }
+
+        cfg.addResource(cfgURL);
+    }
+
+    @Override
+    protected BlobStore blobStore() {
+        return blobStore;
+    }
+
+    @Override
+    protected BlobPath basePath() {
+        return basePath;
+    }
+
+    @Override
+    protected boolean isCompress() {
+        return compress;
+    }
+
+    @Override
+    protected ByteSizeValue chunkSize() {
+        return chunkSize;
+    }
+
+    @Override
+    protected void doClose() throws ElasticsearchException {
+        super.doClose();
+
+        IOUtils.closeStream(fs);
+        fs = null;
+    }
+}
\ No newline at end of file
diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/SecurityUtils.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/SecurityUtils.java
new file mode 100644
index 00000000000..6a0d4ffa818
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/SecurityUtils.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.hdfs;
+
+import java.io.IOException;
+import java.security.AccessControlContext;
+import java.security.AccessController;
+import java.security.PrivilegedActionException;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.SpecialPermission;
+import org.elasticsearch.plugin.hadoop.hdfs.Utils;
+
+class SecurityUtils {
+
+    abstract static class AccBridge extends Utils {
+        static AccessControlContext acc() {
+            return Utils.hadoopACC();
+        }
+    }
+
+    static  V execute(FileSystemFactory ffs, FsCallback callback) throws IOException {
+        return execute(ffs.getFileSystem(), callback);
+    }
+
+    static  V execute(FileSystem fs, FsCallback callback) throws IOException {
+        SecurityManager sm = System.getSecurityManager();
+        if (sm != null) {
+            // unprivileged code such as scripts do not have SpecialPermission
+            sm.checkPermission(new SpecialPermission());
+        }
+
+        try {
+            return AccessController.doPrivileged(new PrivilegedExceptionAction() {
+                @Override
+                public V run() throws IOException {
+                    return callback.doInHdfs(fs);
+                }
+            }, AccBridge.acc());
+        } catch (PrivilegedActionException pae) {
+            Throwable th = pae.getCause();
+            if (th instanceof Error) {
+                throw (Error) th;
+            }
+            if (th instanceof RuntimeException) {
+                throw (RuntimeException) th;
+            }
+            if (th instanceof IOException) {
+                throw (IOException) th;
+            }
+            throw new ElasticsearchException(pae);
+        }
+    }
+}
diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/TestingFs.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/TestingFs.java
new file mode 100644
index 00000000000..46cb0a263fe
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/TestingFs.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.hdfs;
+
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
+import org.elasticsearch.common.SuppressForbidden;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * Extends LFS to improve some operations to keep the security permissions at
+ * bay. In particular mkdir is smarter and doesn't have to walk all the file
+ * hierarchy but rather only limits itself to the parent/working dir and creates
+ * a file only when necessary.
+ */
+public class TestingFs extends LocalFileSystem {
+
+    private static class ImprovedRawLocalFileSystem extends RawLocalFileSystem {
+        @Override
+        @SuppressForbidden(reason = "the Hadoop API depends on java.io.File")
+        public boolean mkdirs(Path f) throws IOException {
+            File wd = pathToFile(getWorkingDirectory());
+            File local = pathToFile(f);
+            if (wd.equals(local) || local.exists()) {
+                return true;
+            }
+            return mkdirs(f.getParent()) && local.mkdir();
+        }
+    }
+
+    public TestingFs() {
+        super(new ImprovedRawLocalFileSystem());
+        // use the build path instead of the starting dir as that one has read permissions
+        //setWorkingDirectory(new Path(getClass().getProtectionDomain().getCodeSource().getLocation().toString()));
+        setWorkingDirectory(new Path(System.getProperty("java.io.tmpdir")));
+    }
+}
diff --git a/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy
new file mode 100644
index 00000000000..d26acd121e4
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/plugin-metadata/plugin-security.policy
@@ -0,0 +1,67 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+grant {
+  // used by the plugin to get the TCCL to properly initialize all of Hadoop components
+  permission java.lang.RuntimePermission "getClassLoader";
+
+  // used for DomainCombiner
+  permission java.security.SecurityPermission "createAccessControlContext";
+  
+  // set TCCL used for bootstrapping Hadoop Configuration and JAAS
+  permission java.lang.RuntimePermission "setContextClassLoader";
+
+  //
+  // Hadoop 1
+  //
+    
+  // UserGroupInformation (UGI)
+  permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
+
+  // UGI triggers JAAS
+  permission javax.security.auth.AuthPermission "getSubject";
+  
+  // JAAS libraries are not loaded with the proper context in Hadoop, hence why the permission is needed here
+  permission java.lang.RuntimePermission "loadLibrary.jaas_nt";
+  
+  // which triggers the use of the Kerberos library
+  permission java.lang.RuntimePermission "accessClassInPackage.sun.security.krb5";
+
+  // plus LoginContext
+  permission javax.security.auth.AuthPermission "modifyPrincipals";
+  
+  permission javax.security.auth.AuthPermission "modifyPublicCredentials";
+  
+  permission javax.security.auth.AuthPermission "modifyPrivateCredentials";
+
+  //
+  // Hadoop 2
+  //
+  
+  // UGI (Ugi Metrics)
+  permission java.lang.RuntimePermission "accessDeclaredMembers";
+
+  // Shell initialization - reading system props
+  permission java.util.PropertyPermission "*", "read,write";
+  
+  permission javax.security.auth.PrivateCredentialPermission "org.apache.hadoop.security.Credentials   \"*\"", "read";
+  
+  // HftpFileSystem (all present FS are loaded and initialized at startup ...)
+  permission java.lang.RuntimePermission "setFactory";
+};
\ No newline at end of file
diff --git a/plugins/repository-hdfs/src/main/resources/hadoop-libs/README.asciidoc b/plugins/repository-hdfs/src/main/resources/hadoop-libs/README.asciidoc
new file mode 100644
index 00000000000..e9f85f3cdf7
--- /dev/null
+++ b/plugins/repository-hdfs/src/main/resources/hadoop-libs/README.asciidoc
@@ -0,0 +1 @@
+Folder containing the required Hadoop client libraries and dependencies.
\ No newline at end of file
diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java
new file mode 100644
index 00000000000..8d8d6755cc3
--- /dev/null
+++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsRepositoryRestIT.java
@@ -0,0 +1,49 @@
+package org.elasticsearch.plugin.hadoop.hdfs;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.elasticsearch.plugin.hadoop.hdfs.HdfsPlugin;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.test.rest.ESRestTestCase;
+import org.elasticsearch.test.rest.RestTestCandidate;
+import org.elasticsearch.test.rest.parser.RestTestParseException;
+
+import com.carrotsearch.randomizedtesting.annotations.Name;
+import com.carrotsearch.randomizedtesting.annotations.ParametersFactory;
+
+public class HdfsRepositoryRestIT extends ESRestTestCase {
+
+    @Override
+    protected Collection> nodePlugins() {
+        return pluginList(HdfsPlugin.class);
+    }
+
+    public HdfsRepositoryRestIT(@Name("yaml") RestTestCandidate testCandidate) {
+        super(testCandidate);
+    }
+
+    @ParametersFactory
+    public static Iterable parameters() throws IOException, RestTestParseException {
+        return ESRestTestCase.createParameters(0, 1);
+    }
+}
diff --git a/core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java
similarity index 61%
rename from core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java
rename to plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java
index 895b3d844f6..8730a46a084 100644
--- a/core/src/main/java/org/elasticsearch/client/transport/ClientTransportModule.java
+++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTestPlugin.java
@@ -1,3 +1,5 @@
+package org.elasticsearch.plugin.hadoop.hdfs;
+
 /*
  * Licensed to Elasticsearch under one or more contributor
  * license agreements. See the NOTICE file distributed with
@@ -17,21 +19,16 @@
  * under the License.
  */
 
-package org.elasticsearch.client.transport;
+import java.net.URL;
+import java.util.Collections;
+import java.util.List;
 
-import org.elasticsearch.client.support.Headers;
-import org.elasticsearch.client.transport.support.TransportProxyClient;
-import org.elasticsearch.common.inject.AbstractModule;
+import org.elasticsearch.plugin.hadoop.hdfs.HdfsPlugin;
 
-/**
- *
- */
-public class ClientTransportModule extends AbstractModule {
+public class HdfsTestPlugin extends HdfsPlugin {
 
     @Override
-    protected void configure() {
-        bind(Headers.class).asEagerSingleton();
-        bind(TransportProxyClient.class).asEagerSingleton();
-        bind(TransportClientNodesService.class).asEagerSingleton();
+    protected List getHadoopClassLoaderPath(String baseLib) {
+        return Collections.emptyList();
     }
 }
diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTests.java
new file mode 100644
index 00000000000..d1b23e92538
--- /dev/null
+++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/HdfsTests.java
@@ -0,0 +1,218 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.plugin.hadoop.hdfs;
+
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.greaterThan;
+
+import java.util.Collection;
+
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.repositories.RepositoryException;
+import org.elasticsearch.repositories.RepositoryMissingException;
+import org.elasticsearch.repositories.hdfs.TestingFs;
+import org.elasticsearch.snapshots.SnapshotState;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
+import org.elasticsearch.test.ESIntegTestCase.Scope;
+import org.elasticsearch.test.ESIntegTestCase.ThirdParty;
+import org.elasticsearch.test.store.MockFSDirectoryService;
+import org.junit.After;
+import org.junit.Before;
+
+/**
+ * You must specify {@code -Dtests.thirdparty=true}
+ */
+@ThirdParty
+@ClusterScope(scope = Scope.SUITE, numDataNodes = 1, transportClientRatio = 0.0)
+public class HdfsTests extends ESIntegTestCase {
+
+    @Override
+    public Settings indexSettings() {
+        return Settings.builder()
+                .put(super.indexSettings())
+                .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false)
+                .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false)
+                .build();
+    }
+
+    @Override
+    protected Settings nodeSettings(int ordinal) {
+        Settings.Builder settings = Settings.builder()
+                .put(super.nodeSettings(ordinal))
+                .put("path.home", createTempDir())
+                .put("path.repo", "")
+                .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false)
+                .put(MockFSDirectoryService.RANDOM_NO_DELETE_OPEN_FILE, false);
+        return settings.build();
+    }
+
+    @Override
+    protected Collection> nodePlugins() {
+        return pluginList(HdfsTestPlugin.class);
+    }
+
+    private String path;
+
+    @Before
+    public final void wipeBefore() throws Exception {
+        wipeRepositories();
+        path = "build/data/repo-" + randomInt();
+    }
+
+    @After
+    public final void wipeAfter() throws Exception {
+        wipeRepositories();
+    }
+
+    public void testSimpleWorkflow() {
+        Client client = client();
+        logger.info("-->  creating hdfs repository with path [{}]", path);
+
+        PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+                .setType("hdfs")
+                .setSettings(Settings.settingsBuilder()
+                        //.put("uri", "hdfs://127.0.0.1:51227")
+                        .put("conf.fs.es-hdfs.impl", TestingFs.class.getName())
+                        .put("uri", "es-hdfs://./build/")
+                        .put("path", path)
+                        .put("conf", "additional-cfg.xml, conf-2.xml")
+                        .put("chunk_size", randomIntBetween(100, 1000) + "k")
+                        .put("compress", randomBoolean())
+                        ).get();
+        assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+        createIndex("test-idx-1", "test-idx-2", "test-idx-3");
+        ensureGreen();
+
+        logger.info("--> indexing some data");
+        for (int i = 0; i < 100; i++) {
+            index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
+            index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
+            index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
+        }
+        refresh();
+        assertThat(count(client, "test-idx-1"), equalTo(100L));
+        assertThat(count(client, "test-idx-2"), equalTo(100L));
+        assertThat(count(client, "test-idx-3"), equalTo(100L));
+
+        logger.info("--> snapshot");
+        CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
+        assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+        assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+        assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
+
+        logger.info("--> delete some data");
+        for (int i = 0; i < 50; i++) {
+            client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
+        }
+        for (int i = 50; i < 100; i++) {
+            client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
+        }
+        for (int i = 0; i < 100; i += 2) {
+            client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
+        }
+        refresh();
+        assertThat(count(client, "test-idx-1"), equalTo(50L));
+        assertThat(count(client, "test-idx-2"), equalTo(50L));
+        assertThat(count(client, "test-idx-3"), equalTo(50L));
+
+        logger.info("--> close indices");
+        client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
+
+        logger.info("--> restore all indices from the snapshot");
+        RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
+        assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+
+        ensureGreen();
+        assertThat(count(client, "test-idx-1"), equalTo(100L));
+        assertThat(count(client, "test-idx-2"), equalTo(100L));
+        assertThat(count(client, "test-idx-3"), equalTo(50L));
+
+        // Test restore after index deletion
+        logger.info("--> delete indices");
+        wipeIndices("test-idx-1", "test-idx-2");
+        logger.info("--> restore one index after deletion");
+        restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
+        assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
+        ensureGreen();
+        assertThat(count(client, "test-idx-1"), equalTo(100L));
+        ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
+        assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
+        assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
+    }
+
+    private void wipeIndices(String... indices) {
+        cluster().wipeIndices(indices);
+    }
+
+    // RepositoryVerificationException.class
+    public void testWrongPath() {
+        Client client = client();
+        logger.info("-->  creating hdfs repository with path [{}]", path);
+
+        try {
+            PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
+                    .setType("hdfs")
+                    .setSettings(Settings.settingsBuilder()
+                            // .put("uri", "hdfs://127.0.0.1:51227/")
+                            .put("conf.fs.es-hdfs.impl", TestingFs.class.getName())
+                        .put("uri", "es-hdfs:///")
+                        .put("path", path + "a@b$c#11:22")
+                        .put("chunk_size", randomIntBetween(100, 1000) + "k")
+                        .put("compress", randomBoolean()))
+                    .get();
+            assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+            createIndex("test-idx-1", "test-idx-2", "test-idx-3");
+            ensureGreen();
+            fail("Path name is invalid");
+        } catch (RepositoryException re) {
+            // expected
+        }
+    }
+
+    /**
+     * Deletes repositories, supports wildcard notation.
+     */
+    public static void wipeRepositories(String... repositories) {
+        // if nothing is provided, delete all
+        if (repositories.length == 0) {
+            repositories = new String[]{"*"};
+        }
+        for (String repository : repositories) {
+            try {
+                client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet();
+            } catch (RepositoryMissingException ex) {
+                // ignore
+            }
+        }
+    }
+
+    private long count(Client client, String index) {
+        return client.prepareSearch(index).setSize(0).get().getHits().totalHits();
+    }
+}
\ No newline at end of file
diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/MiniHDFSCluster.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/MiniHDFSCluster.java
new file mode 100644
index 00000000000..0d700615a1a
--- /dev/null
+++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/MiniHDFSCluster.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.plugin.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.elasticsearch.common.SuppressForbidden;
+
+import java.io.File;
+
+public class MiniHDFSCluster {
+
+    @SuppressForbidden(reason = "Hadoop is messy")
+    public static void main(String[] args) throws Exception {
+        FileUtil.fullyDelete(new File(System.getProperty("test.build.data", "build/test/data"), "dfs/"));
+        // MiniHadoopClusterManager.main(new String[] { "-nomr" });
+        Configuration cfg = new Configuration();
+        cfg.set(DataNode.DATA_DIR_PERMISSION_KEY, "666");
+        cfg.set("dfs.replication", "0");
+        MiniDFSCluster dfsCluster = new MiniDFSCluster(cfg, 1, true, null);
+        FileSystem fs = dfsCluster.getFileSystem();
+        System.out.println(fs.getClass());
+        System.out.println(fs.getUri());
+        System.out.println(dfsCluster.getHftpFileSystem().getClass());
+
+        // dfsCluster.shutdown();
+    }
+}
diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java
new file mode 100644
index 00000000000..37aecb04b9b
--- /dev/null
+++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/plugin/hadoop/hdfs/UtilsTests.java
@@ -0,0 +1,30 @@
+package org.elasticsearch.plugin.hadoop.hdfs;
+
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+import org.elasticsearch.test.ESTestCase;
+
+public class UtilsTests extends ESTestCase {
+
+    public void testDetectLibFolder() {
+        String location = HdfsPlugin.class.getProtectionDomain().getCodeSource().getLocation().toString();
+        assertEquals(location, Utils.detectLibFolder());
+    }
+}
diff --git a/plugins/repository-hdfs/src/test/resources/additional-cfg.xml b/plugins/repository-hdfs/src/test/resources/additional-cfg.xml
new file mode 100644
index 00000000000..b1b6611e924
--- /dev/null
+++ b/plugins/repository-hdfs/src/test/resources/additional-cfg.xml
@@ -0,0 +1,12 @@
+
+
+
+  
+    foo
+    foo
+  
+  
+    paradise
+    lost
+  
+
diff --git a/plugins/repository-hdfs/src/test/resources/conf-2.xml b/plugins/repository-hdfs/src/test/resources/conf-2.xml
new file mode 100644
index 00000000000..b1b6611e924
--- /dev/null
+++ b/plugins/repository-hdfs/src/test/resources/conf-2.xml
@@ -0,0 +1,12 @@
+
+
+
+  
+    foo
+    foo
+  
+  
+    paradise
+    lost
+  
+
diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yaml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yaml
new file mode 100644
index 00000000000..b7bc644a832
--- /dev/null
+++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/10_basic.yaml
@@ -0,0 +1,16 @@
+# Integration tests for HDFS Repository plugin
+#
+# Check plugin is installed
+#
+"HDFS Repository loaded":
+    - do:
+        cluster.state: {}
+
+    # Get master node id
+    - set: { master_node: master }
+
+    - do:
+        nodes.info: {}
+
+    - match:  { nodes.$master.plugins.0.name: repository-hdfs  }
+    - match:  { nodes.$master.plugins.0.jvm: true  }
diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository.disabled b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository.disabled
new file mode 100644
index 00000000000..f1f5f7a65e0
--- /dev/null
+++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/20_repository.disabled
@@ -0,0 +1,25 @@
+# Integration tests for HDFS Repository plugin
+#
+# Check plugin is installed
+#
+"HDFS Repository Config":
+    - do:
+        snapshot.create_repository:
+          repository: test_repo_hdfs_1
+          verify: false
+          body:
+            type: hdfs
+            settings:
+              # local HDFS implementation
+              conf.fs.es-hdfs.impl: "org.elasticsearch.repositories.hdfs.TestingFs"
+              uri: "es-hdfs://./build/"
+              path: "build/data/repo-hdfs"
+
+    # Get repositry
+    - do:
+        snapshot.get_repository:
+          repository: test_repo_hdfs_1
+
+    - is_true: test_repo_hdfs_1
+    - is_true: test_repo_hdfs_1.settings.uri
+    - match: {test_repo_hdfs_1.settings.path : "build/data/repo-hdfs"}
diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle
index 32ad37530c2..90e4dd2d956 100644
--- a/plugins/repository-s3/build.gradle
+++ b/plugins/repository-s3/build.gradle
@@ -49,3 +49,12 @@ test {
   // this is needed for insecure plugins, remove if possible!
   systemProperty 'tests.artifact', project.name 
 }
+
+// classes are missing, e.g. org.apache.log.Logger
+thirdPartyAudit.missingClasses = true
+thirdPartyAudit.excludes = [
+    // uses internal java api: com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl
+    // uses internal java api: com.sun.org.apache.xml.internal.dtm.ref.DTMManagerDefault
+    // uses internal java api: com.sun.org.apache.xpath.internal.XPathContext
+    'com.amazonaws.util.XpathUtils',
+]
diff --git a/qa/evil-tests/build.gradle b/qa/evil-tests/build.gradle
index 96aa6fb635d..3782f368af4 100644
--- a/qa/evil-tests/build.gradle
+++ b/qa/evil-tests/build.gradle
@@ -34,3 +34,14 @@ dependencies {
 test {
   systemProperty 'tests.security.manager', 'false'
 }
+
+// classes are missing, com.ibm.icu.lang.UCharacter
+thirdPartyAudit.missingClasses = true
+thirdPartyAudit.excludes = [
+    // uses internal java api: sun.misc.Unsafe
+    'com.google.common.cache.Striped64',
+    'com.google.common.cache.Striped64$1',
+    'com.google.common.cache.Striped64$Cell',
+    'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator',
+    'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1',
+]
diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java
index 66c3d7959f8..1ad972e10ef 100644
--- a/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java
+++ b/qa/evil-tests/src/test/java/org/elasticsearch/tribe/TribeUnitTests.java
@@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.common.SuppressForbidden;
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.DiscoveryService;
 import org.elasticsearch.node.Node;
 import org.elasticsearch.node.internal.InternalSettingsPreparer;
 import org.elasticsearch.test.ESIntegTestCase;
@@ -57,8 +58,20 @@ public class TribeUnitTests extends ESTestCase {
             .put("node.mode", NODE_MODE)
             .put("path.home", createTempDir()).build();
 
-        tribe1 = new TribeClientNode(Settings.builder().put(baseSettings).put("cluster.name", "tribe1").put("name", "tribe1_node").build()).start();
-        tribe2 = new TribeClientNode(Settings.builder().put(baseSettings).put("cluster.name", "tribe2").put("name", "tribe2_node").build()).start();
+        tribe1 = new TribeClientNode(
+            Settings.builder()
+                .put(baseSettings)
+                .put("cluster.name", "tribe1")
+                .put("name", "tribe1_node")
+                .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong())
+                .build()).start();
+        tribe2 = new TribeClientNode(
+            Settings.builder()
+                .put(baseSettings)
+                .put("cluster.name", "tribe2")
+                .put("name", "tribe2_node")
+                .put(DiscoveryService.SETTING_DISCOVERY_SEED, random().nextLong())
+                .build()).start();
     }
 
     @AfterClass
@@ -73,6 +86,8 @@ public class TribeUnitTests extends ESTestCase {
         System.setProperty("es.cluster.name", "tribe_node_cluster");
         System.setProperty("es.tribe.t1.cluster.name", "tribe1");
         System.setProperty("es.tribe.t2.cluster.name", "tribe2");
+        System.setProperty("es.tribe.t1.discovery.id.seed", Long.toString(random().nextLong()));
+        System.setProperty("es.tribe.t2.discovery.id.seed", Long.toString(random().nextLong()));
 
         try {
             assertTribeNodeSuccesfullyCreated(Settings.EMPTY);
@@ -80,6 +95,8 @@ public class TribeUnitTests extends ESTestCase {
             System.clearProperty("es.cluster.name");
             System.clearProperty("es.tribe.t1.cluster.name");
             System.clearProperty("es.tribe.t2.cluster.name");
+            System.clearProperty("es.tribe.t1.discovery.id.seed");
+            System.clearProperty("es.tribe.t2.discovery.id.seed");
         }
     }
 
diff --git a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml b/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml
index 89f4922a6af..ad1b9be8c89 100644
--- a/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml
+++ b/qa/evil-tests/src/test/resources/org/elasticsearch/tribe/elasticsearch.yml
@@ -1,3 +1,5 @@
 cluster.name: tribe_node_cluster
 tribe.t1.cluster.name: tribe1
-tribe.t2.cluster.name: tribe2
\ No newline at end of file
+tribe.t2.cluster.name: tribe2
+tribe.t1.discovery.id.seed: 1
+tribe.t2.discovery.id.seed: 2
diff --git a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java
index 6e912cfab22..95df2d04458 100644
--- a/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java
+++ b/qa/smoke-test-client/src/test/java/org/elasticsearch/smoketest/ESSmokeClientTestCase.java
@@ -20,6 +20,7 @@
 package org.elasticsearch.smoketest;
 
 import org.apache.lucene.util.LuceneTestCase;
+import org.apache.lucene.util.SuppressForbidden;
 import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
 import org.elasticsearch.client.Client;
 import org.elasticsearch.client.transport.TransportClient;
@@ -34,7 +35,10 @@ import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 
+import java.io.IOException;
 import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.URL;
 import java.net.UnknownHostException;
 import java.nio.file.Path;
 import java.util.Locale;
@@ -103,20 +107,14 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase {
         return client;
     }
 
-    private static Client startClient() throws UnknownHostException {
+    private static Client startClient() throws IOException {
         String[] stringAddresses = clusterAddresses.split(",");
         TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length];
         int i = 0;
         for (String stringAddress : stringAddresses) {
-            String[] split = stringAddress.split(":");
-            if (split.length < 2) {
-                throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid");
-            }
-            try {
-                transportAddresses[i++] = new InetSocketTransportAddress(InetAddress.getByName(split[0]), Integer.valueOf(split[1]));
-            } catch (NumberFormatException e) {
-                throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]");
-            }
+            URL url = new URL("http://" + stringAddress);
+            InetAddress inetAddress = InetAddress.getByName(url.getHost());
+            transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort()));
         }
         return startClient(createTempDir(), transportAddresses);
     }
@@ -125,7 +123,7 @@ public abstract class ESSmokeClientTestCase extends LuceneTestCase {
         if (client == null) {
             try {
                 client = startClient();
-            } catch (UnknownHostException e) {
+            } catch (IOException e) {
                 logger.error("can not start the client", e);
             }
             assertThat(client, notNullValue());
diff --git a/qa/smoke-test-plugins/build.gradle b/qa/smoke-test-plugins/build.gradle
index 70611aed371..9d8e3950a83 100644
--- a/qa/smoke-test-plugins/build.gradle
+++ b/qa/smoke-test-plugins/build.gradle
@@ -22,15 +22,16 @@ import org.elasticsearch.gradle.MavenFilteringHack
 apply plugin: 'elasticsearch.rest-test'
 
 ext.pluginsCount = 0
-project.rootProject.subprojects.findAll { it.path.startsWith(':projects:') }.each { subproj ->
+project.rootProject.subprojects.findAll { it.path.startsWith(':plugins:') }.each { subproj ->
   integTest {
     cluster {
       // need to get a non-decorated project object, so must re-lookup the project by path
       plugin subproj.name, project(subproj.path)
     }
   }
-  pluginCount += 1
+  pluginsCount += 1
 }
+assert pluginsCount > 0
 
 ext.expansions = [
   'expected.plugins.count': pluginsCount
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml
new file mode 100644
index 00000000000..4162296532d
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/11_reset.yaml
@@ -0,0 +1,31 @@
+---
+"Test reset cluster settings":
+  - do:
+      cluster.put_settings:
+        body:
+          persistent:
+            cluster.routing.allocation.disk.threshold_enabled: false
+        flat_settings: true
+
+  - match: {persistent: {cluster.routing.allocation.disk.threshold_enabled: "false"}}
+
+  - do:
+      cluster.get_settings:
+        flat_settings: true
+
+  - match: {persistent: {cluster.routing.allocation.disk.threshold_enabled: "false"}}
+
+  - do:
+      cluster.put_settings:
+        body:
+          persistent:
+            cluster.routing.allocation.disk.threshold_enabled: null
+        flat_settings: true
+
+  - match: {persistent: {}}
+
+  - do:
+      cluster.get_settings:
+        flat_settings: true
+
+  - match: {persistent: {}}
diff --git a/settings.gradle b/settings.gradle
index e928e53b690..3526c0429ef 100644
--- a/settings.gradle
+++ b/settings.gradle
@@ -8,7 +8,7 @@ List projects = [
   'distribution:tar',
   'distribution:deb',
   'distribution:rpm',
-  'test-framework',
+  'test:framework',
   'modules:lang-expression',
   'modules:lang-groovy',
   'modules:lang-mustache',
@@ -29,6 +29,7 @@ List projects = [
   'plugins:mapper-murmur3',
   'plugins:mapper-size',
   'plugins:repository-azure',
+  'plugins:repository-hdfs',
   'plugins:repository-s3',
   'plugins:jvm-example',
   'plugins:site-example',
diff --git a/test/build.gradle b/test/build.gradle
new file mode 100644
index 00000000000..037bb8d508e
--- /dev/null
+++ b/test/build.gradle
@@ -0,0 +1,23 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+subprojects {
+  group = 'org.elasticsearch.test'
+  apply plugin: 'com.bmuschko.nexus'
+}
diff --git a/test-framework/build.gradle b/test/framework/build.gradle
similarity index 93%
rename from test-framework/build.gradle
rename to test/framework/build.gradle
index a423f56c922..a2c568f1d7f 100644
--- a/test-framework/build.gradle
+++ b/test/framework/build.gradle
@@ -19,7 +19,6 @@
 import org.elasticsearch.gradle.precommit.PrecommitTasks
 
 apply plugin: 'elasticsearch.build'
-apply plugin: 'com.bmuschko.nexus'
 
 dependencies {
   compile "org.elasticsearch:elasticsearch:${version}"
@@ -47,3 +46,5 @@ forbiddenApisMain {
 // TODO: should we have licenses for our test deps?
 dependencyLicenses.enabled = false
 
+// we intentionally exclude the ant tasks because people were depending on them from their tests!!!!!!!
+thirdPartyAudit.missingClasses = true
diff --git a/test-framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java b/test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
rename to test/framework/src/main/java/org/elasticsearch/bootstrap/BootstrapForTesting.java
diff --git a/test-framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java b/test/framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java
rename to test/framework/src/main/java/org/elasticsearch/cache/recycler/MockPageCacheRecycler.java
diff --git a/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java
similarity index 92%
rename from test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java
rename to test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java
index 6ac2101fe52..3e9b0c09cb2 100644
--- a/test-framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java
+++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java
@@ -29,10 +29,11 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.common.collect.ImmutableOpenMap;
 import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.transport.DummyTransportAddress;
+import org.elasticsearch.common.unit.TimeValue;
 import org.elasticsearch.monitor.fs.FsInfo;
-import org.elasticsearch.node.settings.NodeSettingsService;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.threadpool.ThreadPool;
 
@@ -77,11 +78,11 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService {
     }
 
     @Inject
-    public MockInternalClusterInfoService(Settings settings, NodeSettingsService nodeSettingsService,
+    public MockInternalClusterInfoService(Settings settings, ClusterSettings clusterSettings,
                                           TransportNodesStatsAction transportNodesStatsAction,
                                           TransportIndicesStatsAction transportIndicesStatsAction,
                                           ClusterService clusterService, ThreadPool threadPool) {
-        super(settings, nodeSettingsService, transportNodesStatsAction, transportIndicesStatsAction, clusterService, threadPool);
+        super(settings, clusterSettings, transportNodesStatsAction, transportIndicesStatsAction, clusterService, threadPool);
         this.clusterName = ClusterName.clusterNameFromSettings(settings);
         stats[0] = makeStats("node_t1", new DiskUsage("node_t1", "n1", "/dev/null", 100, 100));
         stats[1] = makeStats("node_t2", new DiskUsage("node_t2", "n2", "/dev/null", 100, 100));
@@ -133,4 +134,9 @@ public class MockInternalClusterInfoService extends InternalClusterInfoService {
             return "/dev/null";
         }
     }
+
+    @Override
+    public void setUpdateFrequency(TimeValue updateFrequency) {
+        super.setUpdateFrequency(updateFrequency);
+    }
 }
diff --git a/test-framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
rename to test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java
diff --git a/test-framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/common/cli/CliToolTestCase.java
diff --git a/test-framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java b/test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java
rename to test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java
diff --git a/test-framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java b/test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java
rename to test/framework/src/main/java/org/elasticsearch/common/io/PathUtilsForTesting.java
diff --git a/test-framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java
rename to test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java
diff --git a/test-framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java b/test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java
rename to test/framework/src/main/java/org/elasticsearch/index/MapperTestUtils.java
diff --git a/test-framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java b/test/framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java
rename to test/framework/src/main/java/org/elasticsearch/index/MockEngineFactoryPlugin.java
diff --git a/test-framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/node/MockNode.java
rename to test/framework/src/main/java/org/elasticsearch/node/MockNode.java
diff --git a/test-framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java b/test/framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java
rename to test/framework/src/main/java/org/elasticsearch/node/NodeMocksPlugin.java
diff --git a/test-framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java b/test/framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java
rename to test/framework/src/main/java/org/elasticsearch/percolator/PercolatorTestUtil.java
diff --git a/test-framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java b/test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java
rename to test/framework/src/main/java/org/elasticsearch/plugins/PluginTestUtil.java
diff --git a/test-framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
similarity index 97%
rename from test-framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
rename to test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
index bfd40900456..aec90aa50d4 100644
--- a/test-framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
+++ b/test/framework/src/main/java/org/elasticsearch/script/MockScriptEngine.java
@@ -72,7 +72,7 @@ public class MockScriptEngine implements ScriptEngineService {
     }
 
     @Override
-    public Object compile(String script) {
+    public Object compile(String script, Map params) {
         return script;
     }
 
diff --git a/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java
similarity index 82%
rename from test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java
rename to test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java
index 9a7a3efa3dc..98b5181636d 100644
--- a/test-framework/src/main/java/org/elasticsearch/search/MockSearchService.java
+++ b/test/framework/src/main/java/org/elasticsearch/search/MockSearchService.java
@@ -22,12 +22,12 @@ package org.elasticsearch.search;
 import org.elasticsearch.cache.recycler.PageCacheRecycler;
 import org.elasticsearch.cluster.ClusterService;
 import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.settings.ClusterSettings;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.BigArrays;
 import org.elasticsearch.indices.IndicesService;
 import org.elasticsearch.indices.IndicesWarmer;
 import org.elasticsearch.indices.cache.request.IndicesRequestCache;
-import org.elasticsearch.node.settings.NodeSettingsService;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.script.ScriptService;
 import org.elasticsearch.search.dfs.DfsPhase;
@@ -67,13 +67,13 @@ public class MockSearchService extends SearchService {
     }
 
     @Inject
-    public MockSearchService(Settings settings, NodeSettingsService nodeSettingsService, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer,
-            ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays,
-            DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) {
-        super(settings, nodeSettingsService, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase,
+    public MockSearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService, IndicesWarmer indicesWarmer,
+                             ThreadPool threadPool, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays,
+                             DfsPhase dfsPhase, QueryPhase queryPhase, FetchPhase fetchPhase, IndicesRequestCache indicesQueryCache) {
+        super(settings, clusterSettings, clusterService, indicesService, indicesWarmer, threadPool, scriptService, pageCacheRecycler, bigArrays, dfsPhase,
                 queryPhase, fetchPhase, indicesQueryCache);
     }
- 
+
     @Override
     protected void putContext(SearchContext context) {
         super.putContext(context);
diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractTermsTestCase.java
diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java
rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptNoParams.java
diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java
rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/NativeSignificanceScoreScriptWithParams.java
diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java
rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/script/TestScript.java
diff --git a/test-framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractNumericTestCase.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java b/test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java
rename to test/framework/src/main/java/org/elasticsearch/test/BackgroundIndexer.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java
rename to test/framework/src/main/java/org/elasticsearch/test/CompositeTestCluster.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java
rename to test/framework/src/main/java/org/elasticsearch/test/CorruptionUtils.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/DummyShardLock.java b/test/framework/src/main/java/org/elasticsearch/test/DummyShardLock.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/DummyShardLock.java
rename to test/framework/src/main/java/org/elasticsearch/test/DummyShardLock.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java
similarity index 89%
rename from test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java
index c4f4b196739..c02682bb641 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESAllocationTestCase.java
@@ -23,6 +23,7 @@ import org.elasticsearch.cluster.ClusterInfoService;
 import org.elasticsearch.cluster.ClusterModule;
 import org.elasticsearch.cluster.ClusterState;
 import org.elasticsearch.cluster.EmptyClusterInfoService;
+import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.cluster.routing.RoutingNode;
 import org.elasticsearch.cluster.routing.RoutingNodes;
@@ -36,6 +37,8 @@ import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
 import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
 import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
 import org.elasticsearch.cluster.routing.allocation.decider.Decision;
+import org.elasticsearch.common.settings.ClusterSettings;
+import org.elasticsearch.common.Randomness;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.transport.DummyTransportAddress;
 import org.elasticsearch.common.transport.TransportAddress;
@@ -43,7 +46,6 @@ import org.elasticsearch.gateway.AsyncShardFetch;
 import org.elasticsearch.gateway.GatewayAllocator;
 import org.elasticsearch.gateway.ReplicaShardAllocator;
 import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
-import org.elasticsearch.node.settings.NodeSettingsService;
 import org.elasticsearch.test.gateway.NoopGatewayAllocator;
 
 import java.lang.reflect.Constructor;
@@ -67,37 +69,37 @@ public abstract class ESAllocationTestCase extends ESTestCase {
     }
 
     public static MockAllocationService createAllocationService(Settings settings, Random random) {
-        return createAllocationService(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), random);
+        return createAllocationService(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), random);
     }
 
-    public static MockAllocationService createAllocationService(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
+    public static MockAllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings, Random random) {
         return new MockAllocationService(settings,
-                randomAllocationDeciders(settings, nodeSettingsService, random),
+                randomAllocationDeciders(settings, clusterSettings, random),
                 new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), EmptyClusterInfoService.INSTANCE);
     }
 
     public static MockAllocationService createAllocationService(Settings settings, ClusterInfoService clusterInfoService) {
         return new MockAllocationService(settings,
-                randomAllocationDeciders(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()),
+                randomAllocationDeciders(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()),
                 new ShardsAllocators(settings, NoopGatewayAllocator.INSTANCE), clusterInfoService);
     }
 
     public static MockAllocationService createAllocationService(Settings settings, GatewayAllocator allocator) {
         return new MockAllocationService(settings,
-                randomAllocationDeciders(settings, new NodeSettingsService(Settings.Builder.EMPTY_SETTINGS), getRandom()),
+                randomAllocationDeciders(settings, new ClusterSettings(Settings.Builder.EMPTY_SETTINGS, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), getRandom()),
                 new ShardsAllocators(settings, allocator), EmptyClusterInfoService.INSTANCE);
     }
 
 
 
-    public static AllocationDeciders randomAllocationDeciders(Settings settings, NodeSettingsService nodeSettingsService, Random random) {
+    public static AllocationDeciders randomAllocationDeciders(Settings settings, ClusterSettings clusterSettings, Random random) {
         final List> defaultAllocationDeciders = ClusterModule.DEFAULT_ALLOCATION_DECIDERS;
         final List list = new ArrayList<>();
         for (Class deciderClass : ClusterModule.DEFAULT_ALLOCATION_DECIDERS) {
             try {
                 try {
-                    Constructor constructor = deciderClass.getConstructor(Settings.class, NodeSettingsService.class);
-                    list.add(constructor.newInstance(settings, nodeSettingsService));
+                    Constructor constructor = deciderClass.getConstructor(Settings.class, ClusterSettings.class);
+                    list.add(constructor.newInstance(settings, clusterSettings));
                 } catch (NoSuchMethodException e) {
                     Constructor constructor = null;
                     constructor = deciderClass.getConstructor(Settings.class);
@@ -111,7 +113,7 @@ public abstract class ESAllocationTestCase extends ESTestCase {
         for (AllocationDecider d : list) {
             assertThat(defaultAllocationDeciders.contains(d.getClass()), is(true));
         }
-        Collections.shuffle(list, random);
+        Randomness.shuffle(list);
         return new AllocationDeciders(settings, list.toArray(new AllocationDecider[0]));
 
     }
@@ -229,7 +231,8 @@ public abstract class ESAllocationTestCase extends ESTestCase {
             boolean changed = false;
             while (unassignedIterator.hasNext()) {
                 ShardRouting shard = unassignedIterator.next();
-                if (shard.primary() || shard.allocatedPostIndexCreate() == false) {
+                IndexMetaData indexMetaData = allocation.metaData().index(shard.getIndex());
+                if (shard.primary() || shard.allocatedPostIndexCreate(indexMetaData) == false) {
                     continue;
                 }
                 changed |= replicaShardAllocator.ignoreUnassignedIfDelayed(unassignedIterator, shard);
diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java
similarity index 97%
rename from test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java
index 3e5c903a1ba..49644196da4 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESBackcompatTestCase.java
@@ -26,14 +26,12 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
 import org.elasticsearch.cluster.routing.ShardRouting;
 import org.elasticsearch.common.Strings;
 import org.elasticsearch.common.io.PathUtils;
+import org.elasticsearch.common.network.NetworkModule;
 import org.elasticsearch.common.regex.Regex;
 import org.elasticsearch.common.settings.Settings;
-import org.elasticsearch.indices.recovery.RecoverySettings;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.test.junit.annotations.TestLogging;
 import org.elasticsearch.test.junit.listeners.LoggingListener;
-import org.elasticsearch.transport.Transport;
-import org.elasticsearch.transport.TransportModule;
 
 import java.io.IOException;
 import java.lang.annotation.ElementType;
@@ -46,7 +44,6 @@ import java.nio.file.Path;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Map;
-import java.util.Random;
 
 import static org.hamcrest.Matchers.is;
 
@@ -181,6 +178,11 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase {
                 return externalNodeSettings(nodeOrdinal);
             }
 
+            @Override
+            public Collection> nodePlugins() {
+                return Collections.emptyList();
+            }
+
             @Override
             public Settings transportClientSettings() {
                 return transportClientSettings();
@@ -238,7 +240,7 @@ public abstract class ESBackcompatTestCase extends ESIntegTestCase {
 
     protected Settings commonNodeSettings(int nodeOrdinal) {
         Settings.Builder builder = Settings.builder().put(requiredSettings());
-        builder.put(TransportModule.TRANSPORT_TYPE_KEY, "netty"); // run same transport  / disco as external
+        builder.put(NetworkModule.TRANSPORT_TYPE_KEY, "netty"); // run same transport  / disco as external
         builder.put("node.mode", "network");
         return builder.build();
     }
diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
similarity index 97%
rename from test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
index 05dab1c35dc..03089708b66 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java
@@ -20,11 +20,9 @@ package org.elasticsearch.test;
 
 import com.carrotsearch.randomizedtesting.RandomizedContext;
 import com.carrotsearch.randomizedtesting.RandomizedTest;
-import com.carrotsearch.randomizedtesting.Randomness;
 import com.carrotsearch.randomizedtesting.annotations.TestGroup;
 import com.carrotsearch.randomizedtesting.generators.RandomInts;
 import com.carrotsearch.randomizedtesting.generators.RandomPicks;
-
 import org.apache.http.impl.client.HttpClients;
 import org.apache.lucene.util.IOUtils;
 import org.apache.lucene.util.LuceneTestCase;
@@ -35,7 +33,6 @@ import org.elasticsearch.action.ActionListener;
 import org.elasticsearch.action.ShardOperationFailedException;
 import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
 import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
-import org.elasticsearch.cluster.health.ClusterHealthStatus;
 import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
 import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
 import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
@@ -43,8 +40,9 @@ import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
 import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
 import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsResponse;
 import org.elasticsearch.action.admin.indices.flush.FlushResponse;
-import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
+import org.elasticsearch.action.admin.indices.flush.SyncedFlushResponse;
 import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
+import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse;
 import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
 import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
 import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder;
@@ -62,6 +60,7 @@ import org.elasticsearch.client.Client;
 import org.elasticsearch.client.Requests;
 import org.elasticsearch.cluster.ClusterService;
 import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.health.ClusterHealthStatus;
 import org.elasticsearch.cluster.metadata.IndexMetaData;
 import org.elasticsearch.cluster.metadata.MappingMetaData;
 import org.elasticsearch.cluster.metadata.MetaData;
@@ -96,6 +95,7 @@ import org.elasticsearch.discovery.zen.ZenDiscovery;
 import org.elasticsearch.discovery.zen.elect.ElectMasterService;
 import org.elasticsearch.env.Environment;
 import org.elasticsearch.index.IndexService;
+import org.elasticsearch.index.MockEngineFactoryPlugin;
 import org.elasticsearch.index.codec.CodecService;
 import org.elasticsearch.index.mapper.DocumentMapper;
 import org.elasticsearch.index.mapper.MappedFieldType;
@@ -112,13 +112,18 @@ import org.elasticsearch.indices.cache.request.IndicesRequestCache;
 import org.elasticsearch.indices.flush.SyncedFlushService;
 import org.elasticsearch.indices.store.IndicesStore;
 import org.elasticsearch.node.Node;
+import org.elasticsearch.node.NodeMocksPlugin;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.MockSearchService;
 import org.elasticsearch.search.SearchHit;
 import org.elasticsearch.search.SearchService;
 import org.elasticsearch.test.client.RandomizingClient;
 import org.elasticsearch.test.disruption.ServiceDisruptionScheme;
 import org.elasticsearch.test.rest.client.http.HttpRequestBuilder;
+import org.elasticsearch.test.store.MockFSIndexStore;
+import org.elasticsearch.test.transport.AssertingLocalTransport;
+import org.elasticsearch.test.transport.MockTransportService;
 import org.hamcrest.Matchers;
 import org.joda.time.DateTimeZone;
 import org.junit.After;
@@ -129,6 +134,7 @@ import org.junit.BeforeClass;
 import java.io.IOException;
 import java.io.InputStream;
 import java.lang.annotation.Annotation;
+import java.lang.annotation.Documented;
 import java.lang.annotation.ElementType;
 import java.lang.annotation.Inherited;
 import java.lang.annotation.Retention;
@@ -136,7 +142,7 @@ import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
+import java.net.URL;
 import java.nio.file.DirectoryStream;
 import java.nio.file.Files;
 import java.nio.file.Path;
@@ -159,6 +165,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.function.BooleanSupplier;
 
+import static org.elasticsearch.client.Requests.syncedFlushRequest;
 import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
 import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
 import static org.elasticsearch.common.settings.Settings.settingsBuilder;
@@ -533,7 +540,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
     }
 
     private TestCluster buildWithPrivateContext(final Scope scope, final long seed) throws Exception {
-        return RandomizedContext.current().runWithPrivateRandomness(new Randomness(seed), new Callable() {
+        return RandomizedContext.current().runWithPrivateRandomness(new com.carrotsearch.randomizedtesting.Randomness(seed), new Callable() {
             @Override
             public TestCluster call() throws Exception {
                 return buildTestCluster(scope, seed);
@@ -1039,7 +1046,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
      */
     public void setMinimumMasterNodes(int n) {
         assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings(
-                settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n))
+                settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), n))
                 .get().isAcknowledged());
     }
 
@@ -1388,7 +1395,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
             }
         }
         final String[] indices = indicesSet.toArray(new String[indicesSet.size()]);
-        Collections.shuffle(builders, random);
+        Collections.shuffle(builders, random());
         final CopyOnWriteArrayList> errors = new CopyOnWriteArrayList<>();
         List inFlightAsyncOperations = new ArrayList<>();
         // If you are indexing just a few documents then frequently do it one at a time.  If many then frequently in bulk.
@@ -1474,7 +1481,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
 
     /** Sets or unsets the cluster read_only mode **/
     public static void setClusterReadOnly(boolean value) {
-        Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build();
+        Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), value).build();
         assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get());
     }
 
@@ -1496,13 +1503,13 @@ public abstract class ESIntegTestCase extends ESTestCase {
                 if (randomBoolean()) {
                     client().admin().indices().prepareFlush(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).execute(
                             new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
-                } else if (isInternalCluster()) {
-                    internalCluster().getInstance(SyncedFlushService.class).attemptSyncedFlush(indices, IndicesOptions.lenientExpandOpen(),
-                            new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
+                } else {
+                    client().admin().indices().syncedFlush(syncedFlushRequest(indices).indicesOptions(IndicesOptions.lenientExpandOpen()),
+                        new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
                 }
             } else if (rarely()) {
                 client().admin().indices().prepareForceMerge(indices).setIndicesOptions(IndicesOptions.lenientExpandOpen()).setMaxNumSegments(between(1, 10)).setFlush(maybeFlush && randomBoolean()).execute(
-                        new LatchedActionListener(newLatch(inFlightAsyncOperations)));
+                        new LatchedActionListener<>(newLatch(inFlightAsyncOperations)));
             }
         }
         while (inFlightAsyncOperations.size() > MAX_IN_FLIGHT_ASYNC_INDEXES) {
@@ -1687,8 +1694,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
         Settings.Builder builder = settingsBuilder()
                 // Default the watermarks to absurdly low to prevent the tests
                 // from failing on nodes without enough disk space
-                .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "1b")
-                .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "1b")
+                .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b")
+                .put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b")
                 .put("script.indexed", "on")
                 .put("script.inline", "on")
                         // wait short time for other active shards before actually deleting, default 30s not needed in tests
@@ -1727,20 +1734,14 @@ public abstract class ESIntegTestCase extends ESTestCase {
         return Settings.EMPTY;
     }
 
-    private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws UnknownHostException {
+    private ExternalTestCluster buildExternalCluster(String clusterAddresses) throws IOException {
         String[] stringAddresses = clusterAddresses.split(",");
         TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length];
         int i = 0;
         for (String stringAddress : stringAddresses) {
-            String[] split = stringAddress.split(":");
-            if (split.length < 2) {
-                throw new IllegalArgumentException("address [" + clusterAddresses + "] not valid");
-            }
-            try {
-                transportAddresses[i++] = new InetSocketTransportAddress(InetAddress.getByName(split[0]), Integer.valueOf(split[1]));
-            } catch (NumberFormatException e) {
-                throw new IllegalArgumentException("port is not valid, expected number but was [" + split[1] + "]");
-            }
+            URL url = new URL("http://" + stringAddress);
+            InetAddress inetAddress = InetAddress.getByName(url.getHost());
+            transportAddresses[i++] = new InetSocketTransportAddress(new InetSocketAddress(inetAddress, url.getPort()));
         }
         return new ExternalTestCluster(createTempDir(), externalClusterClientSettings(), transportClientPlugins(), transportAddresses);
     }
@@ -1812,14 +1813,21 @@ public abstract class ESIntegTestCase extends ESTestCase {
             nodeMode = "local";
         }
 
-        boolean enableMockModules = enableMockModules();
+        Collection> mockPlugins = getMockPlugins();
+
         return new InternalTestCluster(nodeMode, seed, createTempDir(), minNumDataNodes, maxNumDataNodes,
                 InternalTestCluster.clusterName(scope.name(), seed) + "-cluster", nodeConfigurationSource, getNumClientNodes(),
-                InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, enableMockModules);
+                InternalTestCluster.DEFAULT_ENABLE_HTTP_PIPELINING, nodePrefix, mockPlugins);
     }
 
-    protected boolean enableMockModules() {
-        return RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true);
+    /** Return the mock plugins the cluster should use. These may be randomly omitted based on the cluster seed. */
+    protected Collection> getMockPlugins() {
+        return pluginList(MockTransportService.TestPlugin.class,
+                          MockFSIndexStore.TestPlugin.class,
+                          NodeMocksPlugin.class,
+                          MockEngineFactoryPlugin.class,
+                          MockSearchService.TestPlugin.class,
+                          AssertingLocalTransport.TestPlugin.class);
     }
 
     /**
diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
similarity index 99%
rename from test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
index c59c3ba4d4e..e1443110c0d 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/ESTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java
@@ -566,7 +566,7 @@ public abstract class ESTestCase extends LuceneTestCase {
             throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects");
         }
         List list = arrayAsArrayList(values);
-        Collections.shuffle(list);
+        Collections.shuffle(list, random());
         return list.subList(0, size);
     }
 
@@ -615,7 +615,7 @@ public abstract class ESTestCase extends LuceneTestCase {
         sb.append("]");
         assertThat(count + " files exist that should have been cleaned:\n" + sb.toString(), count, equalTo(0));
     }
-    
+
     /** Returns the suite failure marker: internal use only! */
     public static TestRuleMarkFailure getSuiteFailureMarker() {
         return suiteFailureMarker;
diff --git a/test-framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/test/ESTokenStreamTestCase.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/ExternalNode.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java
similarity index 98%
rename from test-framework/src/main/java/org/elasticsearch/test/ExternalNode.java
rename to test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java
index 6ab39a5b139..05f194fc26a 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/ExternalNode.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalNode.java
@@ -28,11 +28,11 @@ import org.elasticsearch.common.SuppressForbidden;
 import org.elasticsearch.common.io.PathUtils;
 import org.elasticsearch.common.logging.ESLogger;
 import org.elasticsearch.common.logging.Loggers;
+import org.elasticsearch.common.network.NetworkModule;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.transport.TransportAddress;
 import org.elasticsearch.discovery.DiscoveryModule;
 import org.elasticsearch.node.internal.InternalSettingsPreparer;
-import org.elasticsearch.transport.TransportModule;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -111,9 +111,9 @@ final class ExternalNode implements Closeable {
                 case "path.home":
                 case "node.mode":
                 case "node.local":
-                case TransportModule.TRANSPORT_TYPE_KEY:
+                case NetworkModule.TRANSPORT_TYPE_KEY:
                 case DiscoveryModule.DISCOVERY_TYPE_KEY:
-                case TransportModule.TRANSPORT_SERVICE_TYPE_KEY:
+                case NetworkModule.TRANSPORT_SERVICE_TYPE_KEY:
                 case InternalSettingsPreparer.IGNORE_SYSTEM_PROPERTIES_SETTING:
                     continue;
                 default:
diff --git a/test-framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java
rename to test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java b/test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java
rename to test/framework/src/main/java/org/elasticsearch/test/FieldMaskingReader.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java b/test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java
rename to test/framework/src/main/java/org/elasticsearch/test/IndexSettingsModule.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
similarity index 97%
rename from test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
rename to test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
index 7ae3226b66a..10d4482a24c 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java
@@ -199,7 +199,7 @@ public final class InternalTestCluster extends TestCluster {
 
     private final ExecutorService executor;
 
-    private final boolean enableMockModules;
+    private final Collection> mockPlugins;
 
     /**
      * All nodes started by the cluster will have their name set to nodePrefix followed by a positive number
@@ -212,7 +212,7 @@ public final class InternalTestCluster extends TestCluster {
 
     public InternalTestCluster(String nodeMode, long clusterSeed, Path baseDir,
                                int minNumDataNodes, int maxNumDataNodes, String clusterName, NodeConfigurationSource nodeConfigurationSource, int numClientNodes,
-                               boolean enableHttpPipelining, String nodePrefix, boolean enableMockModules) {
+                               boolean enableHttpPipelining, String nodePrefix, Collection> mockPlugins) {
         super(clusterSeed);
         if ("network".equals(nodeMode) == false && "local".equals(nodeMode) == false) {
             throw new IllegalArgumentException("Unknown nodeMode: " + nodeMode);
@@ -248,7 +248,7 @@ public final class InternalTestCluster extends TestCluster {
         this.nodePrefix = nodePrefix;
 
         assert nodePrefix != null;
-        this.enableMockModules = enableMockModules;
+        this.mockPlugins = mockPlugins;
 
         /*
          *  TODO
@@ -292,19 +292,19 @@ public final class InternalTestCluster extends TestCluster {
         }
         // Default the watermarks to absurdly low to prevent the tests
         // from failing on nodes without enough disk space
-        builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK, "1b");
-        builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK, "1b");
+        builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b");
+        builder.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING.getKey(), "1b");
         if (TEST_NIGHTLY) {
-            builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, RandomInts.randomIntBetween(random, 10, 15));
-            builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RandomInts.randomIntBetween(random, 10, 15));
-            builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, RandomInts.randomIntBetween(random, 5, 10));
+            builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), RandomInts.randomIntBetween(random, 10, 15));
+            builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), RandomInts.randomIntBetween(random, 10, 15));
+            builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 5, 10));
         } else if (random.nextInt(100) <= 90) {
-            builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS, RandomInts.randomIntBetween(random, 3, 6));
-            builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS, RandomInts.randomIntBetween(random, 3, 6));
-            builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES, RandomInts.randomIntBetween(random, 2, 5));
+            builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_STREAMS_SETTING.getKey(), RandomInts.randomIntBetween(random, 3, 6));
+            builder.put(RecoverySettings.INDICES_RECOVERY_CONCURRENT_SMALL_FILE_STREAMS_SETTING.getKey(), RandomInts.randomIntBetween(random, 3, 6));
+            builder.put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), RandomInts.randomIntBetween(random, 2, 5));
         }
         // always reduce this - it can make tests really slow
-        builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC, TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50)));
+        builder.put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.getKey(), TimeValue.timeValueMillis(RandomInts.randomIntBetween(random, 20, 50)));
         defaultSettings = builder.build();
         executor = EsExecutors.newCached("test runner", 0, TimeUnit.SECONDS, EsExecutors.daemonThreadFactory("test_" + clusterName));
     }
@@ -359,16 +359,10 @@ public final class InternalTestCluster extends TestCluster {
 
     private Collection> getPlugins(long seed) {
         Set> plugins = new HashSet<>(nodeConfigurationSource.nodePlugins());
-        Random random = new Random(seed);
-        if (enableMockModules && usually(random)) {
-            plugins.add(MockTransportService.TestPlugin.class);
-            plugins.add(MockFSIndexStore.TestPlugin.class);
-            plugins.add(NodeMocksPlugin.class);
-            plugins.add(MockEngineFactoryPlugin.class);
-            plugins.add(MockSearchService.TestPlugin.class);
-            if (isLocalTransportConfigured()) {
-                plugins.add(AssertingLocalTransport.TestPlugin.class);
-            }
+        plugins.addAll(mockPlugins);
+        if (isLocalTransportConfigured() == false) {
+            // this is crazy we must do this here...we should really just always be using local transport...
+            plugins.remove(AssertingLocalTransport.TestPlugin.class);
         }
         return plugins;
     }
@@ -378,7 +372,7 @@ public final class InternalTestCluster extends TestCluster {
         Builder builder = Settings.settingsBuilder()
                 .put(SETTING_CLUSTER_NODE_SEED, seed);
         if (isLocalTransportConfigured() == false) {
-            builder.put(Transport.TransportSettings.TRANSPORT_TCP_COMPRESS, rarely(random));
+            builder.put(Transport.TRANSPORT_TCP_COMPRESS.getKey(), rarely(random));
         }
         if (random.nextBoolean()) {
             builder.put("cache.recycler.page.type", RandomPicks.randomFrom(random, PageCacheRecycler.Type.values()));
@@ -412,12 +406,12 @@ public final class InternalTestCluster extends TestCluster {
         }
 
         if (random.nextBoolean()) {
-            builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT, new TimeValue(RandomInts.randomIntBetween(random, 10, 30), TimeUnit.SECONDS));
+            builder.put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), new TimeValue(RandomInts.randomIntBetween(random, 10, 30), TimeUnit.SECONDS));
         }
 
         if (random.nextInt(10) == 0) {
-            builder.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING, "noop");
-            builder.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING, "noop");
+            builder.put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop");
+            builder.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop");
         }
 
         if (random.nextBoolean()) {
@@ -430,20 +424,20 @@ public final class InternalTestCluster extends TestCluster {
 
         if (random.nextBoolean()) {
             if (random.nextInt(10) == 0) { // do something crazy slow here
-                builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB));
+                builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB));
             } else {
-                builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB));
+                builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB));
             }
         }
         if (random.nextBoolean()) {
-            builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE, RandomPicks.randomFrom(random, StoreRateLimiting.Type.values()));
+            builder.put(IndexStoreConfig.INDICES_STORE_THROTTLE_TYPE_SETTING.getKey(), RandomPicks.randomFrom(random, StoreRateLimiting.Type.values()));
         }
 
         if (random.nextBoolean()) {
             if (random.nextInt(10) == 0) { // do something crazy slow here
-                builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB));
+                builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 1, 10), ByteSizeUnit.MB));
             } else {
-                builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC, new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB));
+                builder.put(RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey(), new ByteSizeValue(RandomInts.randomIntBetween(random, 10, 200), ByteSizeUnit.MB));
             }
         }
 
diff --git a/test-framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java b/test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
rename to test/framework/src/main/java/org/elasticsearch/test/MockIndexEventListener.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java
similarity index 67%
rename from test-framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java
rename to test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java
index e04e840e525..5dfb845c192 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/NodeConfigurationSource.java
@@ -19,10 +19,18 @@
 package org.elasticsearch.test;
 
 import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.index.MockEngineFactoryPlugin;
+import org.elasticsearch.node.NodeMocksPlugin;
 import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.search.MockSearchService;
+import org.elasticsearch.test.store.MockFSIndexStore;
+import org.elasticsearch.test.transport.AssertingLocalTransport;
+import org.elasticsearch.test.transport.MockTransportService;
 
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.List;
 
 public abstract class NodeConfigurationSource {
 
@@ -43,6 +51,18 @@ public abstract class NodeConfigurationSource {
      */
     public abstract Settings nodeSettings(int nodeOrdinal);
 
+    /** Plugins that will be randomly added to the node */
+    public Collection> mockPlugins() {
+        List> plugins = new ArrayList<>();
+        plugins.add(MockTransportService.TestPlugin.class);
+        plugins.add(MockFSIndexStore.TestPlugin.class);
+        plugins.add(NodeMocksPlugin.class);
+        plugins.add(MockEngineFactoryPlugin.class);
+        plugins.add(MockSearchService.TestPlugin.class);
+        plugins.add(AssertingLocalTransport.TestPlugin.class);
+        return plugins;
+    }
+
     /** Returns plugins that should be loaded on the node */
     public Collection> nodePlugins() {
         return Collections.emptyList();
diff --git a/test-framework/src/main/java/org/elasticsearch/test/StreamsUtils.java b/test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/StreamsUtils.java
rename to test/framework/src/main/java/org/elasticsearch/test/StreamsUtils.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/TestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/TestCluster.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/TestCluster.java
rename to test/framework/src/main/java/org/elasticsearch/test/TestCluster.java
diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java b/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java
new file mode 100644
index 00000000000..92d5b95cfac
--- /dev/null
+++ b/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetaData.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.test;
+
+import org.elasticsearch.ElasticsearchParseException;
+import org.elasticsearch.cluster.AbstractDiffable;
+import org.elasticsearch.cluster.metadata.MetaData;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+
+public abstract class TestCustomMetaData extends AbstractDiffable implements MetaData.Custom {
+    private final String data;
+
+    protected TestCustomMetaData(String data) {
+        this.data = data;
+    }
+
+    public String getData() {
+        return data;
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || getClass() != o.getClass()) return false;
+
+        TestCustomMetaData that = (TestCustomMetaData) o;
+
+        if (!data.equals(that.data)) return false;
+
+        return true;
+    }
+
+    @Override
+    public int hashCode() {
+        return data.hashCode();
+    }
+
+    protected abstract TestCustomMetaData newTestCustomMetaData(String data);
+
+    @Override
+    public MetaData.Custom readFrom(StreamInput in) throws IOException {
+        return newTestCustomMetaData(in.readString());
+    }
+
+    @Override
+    public void writeTo(StreamOutput out) throws IOException {
+        out.writeString(getData());
+    }
+
+    @Override
+    public MetaData.Custom fromXContent(XContentParser parser) throws IOException {
+        XContentParser.Token token;
+        String data = null;
+        while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+            if (token == XContentParser.Token.FIELD_NAME) {
+                String currentFieldName = parser.currentName();
+                if ("data".equals(currentFieldName)) {
+                    if (parser.nextToken() != XContentParser.Token.VALUE_STRING) {
+                        throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type");
+                    }
+                    data = parser.text();
+                } else {
+                    throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [{}]", currentFieldName);
+                }
+            } else {
+                throw new ElasticsearchParseException("failed to parse snapshottable metadata");
+            }
+        }
+        if (data == null) {
+            throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found");
+        }
+        return newTestCustomMetaData(data);
+    }
+
+    @Override
+    public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+        builder.field("data", getData());
+        return builder;
+    }
+}
diff --git a/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
similarity index 98%
rename from test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
rename to test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
index 468b1877250..51fb0c905f4 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java
@@ -35,7 +35,6 @@ import org.elasticsearch.common.util.BigArrays;
 import org.elasticsearch.index.IndexService;
 import org.elasticsearch.index.analysis.AnalysisService;
 import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
-import org.elasticsearch.index.cache.query.QueryCache;
 import org.elasticsearch.index.engine.Engine;
 import org.elasticsearch.index.fielddata.IndexFieldDataService;
 import org.elasticsearch.index.mapper.MappedFieldType;
@@ -60,6 +59,8 @@ import org.elasticsearch.search.internal.ScrollContext;
 import org.elasticsearch.search.internal.SearchContext;
 import org.elasticsearch.search.internal.ShardSearchRequest;
 import org.elasticsearch.search.lookup.SearchLookup;
+import org.elasticsearch.search.profile.Profiler;
+import org.elasticsearch.search.profile.Profilers;
 import org.elasticsearch.search.query.QuerySearchResult;
 import org.elasticsearch.search.rescore.RescoreSearchContext;
 import org.elasticsearch.search.suggest.SuggestionSearchContext;
@@ -293,7 +294,7 @@ public class TestSearchContext extends SearchContext {
     }
 
     public void setSearcher(Engine.Searcher searcher) {
-        this.searcher = new ContextIndexSearcher(this, searcher);
+        this.searcher = new ContextIndexSearcher(searcher, indexService.cache().query(), indexShard.getQueryCachingPolicy());
     }
 
     @Override
@@ -660,8 +661,11 @@ public class TestSearchContext extends SearchContext {
     public void copyContextAndHeadersFrom(HasContextAndHeaders other) {}
 
     @Override
-    public Map, Collector> queryCollectors() {return queryCollectors;}
+    public Profilers getProfilers() {
+        return null; // no profiling
+    }
 
     @Override
-    public QueryCache getQueryCache() { return indexService.cache().query();}
+    public Map, Collector> queryCollectors() {return queryCollectors;}
+
 }
diff --git a/test-framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/VersionUtils.java
rename to test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java b/test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java
rename to test/framework/src/main/java/org/elasticsearch/test/XContentTestUtils.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java
rename to test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java
rename to test/framework/src/main/java/org/elasticsearch/test/cluster/NoopClusterService.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java b/test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java
rename to test/framework/src/main/java/org/elasticsearch/test/cluster/TestClusterService.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java
similarity index 98%
rename from test-framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java
rename to test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java
index e549c185616..484f65ea650 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/discovery/ClusterDiscoveryConfiguration.java
@@ -24,12 +24,15 @@ import org.elasticsearch.common.SuppressForbidden;
 import org.elasticsearch.common.network.NetworkUtils;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.test.InternalTestCluster;
 import org.elasticsearch.test.NodeConfigurationSource;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
+import java.util.Collection;
+import java.util.Collections;
 import java.util.HashSet;
 import java.util.Set;
 
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/BlockClusterStateProcessing.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/IntermittentLongGCDisruption.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/LongGCDisruption.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDelaysPartition.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkDisconnectPartition.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartition.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkPartitionIT.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NetworkUnresponsivePartition.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/ServiceDisruptionScheme.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
rename to test/framework/src/main/java/org/elasticsearch/test/disruption/SlowClusterStateProcessing.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java b/test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java
rename to test/framework/src/main/java/org/elasticsearch/test/engine/AssertingSearcher.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java
rename to test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineFactory.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
rename to test/framework/src/main/java/org/elasticsearch/test/engine/MockEngineSupport.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java
rename to test/framework/src/main/java/org/elasticsearch/test/engine/MockInternalEngine.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java b/test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java
rename to test/framework/src/main/java/org/elasticsearch/test/engine/MockShadowEngine.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java b/test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java
rename to test/framework/src/main/java/org/elasticsearch/test/engine/ThrowingLeafReaderWrapper.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
rename to test/framework/src/main/java/org/elasticsearch/test/gateway/NoopGatewayAllocator.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionAssertions.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/CollectionMatchers.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
rename to test/framework/src/main/java/org/elasticsearch/test/hamcrest/RegexMatcher.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java
rename to test/framework/src/main/java/org/elasticsearch/test/junit/annotations/Network.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java b/test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java
rename to test/framework/src/main/java/org/elasticsearch/test/junit/annotations/TestLogging.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
rename to test/framework/src/main/java/org/elasticsearch/test/junit/listeners/LoggingListener.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
rename to test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java b/test/framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java
rename to test/framework/src/main/java/org/elasticsearch/test/junit/rule/RepeatOnExceptionRule.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java b/test/framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcher.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/FakeRestRequest.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/RestTestCandidate.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/Stash.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestPath.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
similarity index 99%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
index 34665efa0f1..f6ce416dbff 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java
@@ -168,6 +168,7 @@ public class HttpRequestBuilder {
             logger.trace("sending request \n{}", stringBuilder.toString());
         }
         for (Map.Entry entry : this.headers.entrySet()) {
+            logger.trace("adding header [{} => {}]", entry.getKey(), entry.getValue());
             httpUriRequest.addHeader(entry.getKey(), entry.getValue());
         }
         try (CloseableHttpResponse closeableHttpResponse = httpClient.execute(httpUriRequest)) {
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/json/JsonPath.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/DoSectionParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanEqualToParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/GreaterThanParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsFalseParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/IsTrueParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/LengthParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanOrEqualToParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/LessThanParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/MatchParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestFragmentParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestParseException.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSectionParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParseContext.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/RestTestSuiteParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetSectionParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/SetupSectionParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/parser/SkipSectionParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/ApiCallSection.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/Assertion.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/ExecutableSection.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanAssertion.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/GreaterThanEqualToAssertion.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/IsFalseAssertion.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/IsTrueAssertion.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/LengthAssertion.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanAssertion.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/LessThanOrEqualToAssertion.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/MatchAssertion.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/RestTestSuite.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/SetSection.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/SetupSection.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/SkipSection.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/section/TestSection.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApi.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestApiParser.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java b/test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/spec/RestSpec.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/support/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/support/Features.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/support/Features.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/support/Features.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java b/test/framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java
rename to test/framework/src/main/java/org/elasticsearch/test/rest/support/FileUtils.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
rename to test/framework/src/main/java/org/elasticsearch/test/store/MockFSDirectoryService.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java b/test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
rename to test/framework/src/main/java/org/elasticsearch/test/store/MockFSIndexStore.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
similarity index 92%
rename from test-framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
rename to test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
index 64cc401cb5f..8b395003576 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/AssertingLocalTransport.java
@@ -23,6 +23,7 @@ import org.elasticsearch.Version;
 import org.elasticsearch.cluster.node.DiscoveryNode;
 import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
+import org.elasticsearch.common.network.NetworkModule;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.plugins.Plugin;
 import org.elasticsearch.test.ESIntegTestCase;
@@ -30,7 +31,6 @@ import org.elasticsearch.test.VersionUtils;
 import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
 import org.elasticsearch.threadpool.ThreadPool;
 import org.elasticsearch.transport.TransportException;
-import org.elasticsearch.transport.TransportModule;
 import org.elasticsearch.transport.TransportRequest;
 import org.elasticsearch.transport.TransportRequestOptions;
 import org.elasticsearch.transport.TransportResponse;
@@ -51,12 +51,12 @@ public class AssertingLocalTransport extends LocalTransport {
         public String description() {
             return "an asserting transport for testing";
         }
-        public void onModule(TransportModule transportModule) {
-            transportModule.addTransport("mock", AssertingLocalTransport.class);
+        public void onModule(NetworkModule module) {
+            module.registerTransport("mock", AssertingLocalTransport.class);
         }
         @Override
         public Settings additionalSettings() {
-            return Settings.builder().put(TransportModule.TRANSPORT_TYPE_KEY, "mock").build();
+            return Settings.builder().put(NetworkModule.TRANSPORT_TYPE_KEY, "mock").build();
         }
     }
 
diff --git a/test-framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java b/test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
similarity index 100%
rename from test-framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
rename to test/framework/src/main/java/org/elasticsearch/test/transport/CapturingTransport.java
diff --git a/test-framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
similarity index 98%
rename from test-framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
rename to test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
index e1efd6c3745..d636341e42f 100644
--- a/test-framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java
@@ -25,6 +25,7 @@ import org.elasticsearch.common.component.LifecycleListener;
 import org.elasticsearch.common.inject.Inject;
 import org.elasticsearch.common.io.stream.BytesStreamOutput;
 import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.network.NetworkModule;
 import org.elasticsearch.common.network.NetworkService;
 import org.elasticsearch.common.settings.Settings;
 import org.elasticsearch.common.transport.BoundTransportAddress;
@@ -38,7 +39,6 @@ import org.elasticsearch.transport.ConnectTransportException;
 import org.elasticsearch.transport.RequestHandlerRegistry;
 import org.elasticsearch.transport.Transport;
 import org.elasticsearch.transport.TransportException;
-import org.elasticsearch.transport.TransportModule;
 import org.elasticsearch.transport.TransportRequest;
 import org.elasticsearch.transport.TransportRequestOptions;
 import org.elasticsearch.transport.TransportService;
@@ -75,12 +75,12 @@ public class MockTransportService extends TransportService {
         public String description() {
             return "a mock transport service for testing";
         }
-        public void onModule(TransportModule transportModule) {
-            transportModule.addTransportService("mock", MockTransportService.class);
+        public void onModule(NetworkModule module) {
+            module.registerTransportService("mock", MockTransportService.class);
         }
         @Override
         public Settings additionalSettings() {
-            return Settings.builder().put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, "mock").build();
+            return Settings.builder().put(NetworkModule.TRANSPORT_SERVICE_TYPE_KEY, "mock").build();
         }
     }
 
diff --git a/test-framework/src/main/resources/log4j.properties b/test/framework/src/main/resources/log4j.properties
similarity index 100%
rename from test-framework/src/main/resources/log4j.properties
rename to test/framework/src/main/resources/log4j.properties
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcherTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcherTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcherTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/BlacklistedPathPatternMatcherTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTestCase.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTestCase.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTestCase.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/AbstractParserTestCase.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/AssertionParsersTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/DoSectionParserTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/FileUtilsTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/JsonPathTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserFailingTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/RestApiParserTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/RestTestParserTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/SetSectionParserTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/SetupSectionParserTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/rest/test/TestSectionParserTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java
similarity index 97%
rename from test-framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java
index 1514e254a7f..af468fa084c 100644
--- a/test-framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java
+++ b/test/framework/src/test/java/org/elasticsearch/test/test/InternalTestClusterTests.java
@@ -52,8 +52,8 @@ public class InternalTestClusterTests extends ESTestCase {
         String nodePrefix = randomRealisticUnicodeOfCodepointLengthBetween(1, 10);
 
         Path baseDir = createTempDir();
-        InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true);
-        InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true);
+        InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, Collections.emptyList());
+        InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, Collections.emptyList());
         // TODO: this is not ideal - we should have a way to make sure ports are initialized in the same way
         assertClusters(cluster0, cluster1, false);
 
@@ -110,8 +110,8 @@ public class InternalTestClusterTests extends ESTestCase {
         String nodePrefix = "foobar";
 
         Path baseDir = createTempDir();
-        InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true);
-        InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, true);
+        InternalTestCluster cluster0 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName1, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, Collections.emptyList());
+        InternalTestCluster cluster1 = new InternalTestCluster("local", clusterSeed, baseDir, minNumDataNodes, maxNumDataNodes, clusterName2, nodeConfigurationSource, numClientNodes, enableHttpPipelining, nodePrefix, Collections.emptyList());
 
         assertClusters(cluster0, cluster1, false);
         long seed = randomLong();
diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/test/LoggingListenerTests.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java b/test/framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java
rename to test/framework/src/test/java/org/elasticsearch/test/test/SuiteScopeClusterIT.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java b/test/framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java
rename to test/framework/src/test/java/org/elasticsearch/test/test/TestScopeClusterIT.java
diff --git a/test-framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java b/test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java
similarity index 100%
rename from test-framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java
rename to test/framework/src/test/java/org/elasticsearch/test/test/VersionUtilsTests.java
diff --git a/test-framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yaml b/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yaml
similarity index 100%
rename from test-framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yaml
rename to test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yaml
diff --git a/test-framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yaml b/test/framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yaml
similarity index 100%
rename from test-framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yaml
rename to test/framework/src/test/resources/rest-api-spec/test/suite1/20_another_test.yaml
diff --git a/test-framework/src/test/resources/rest-api-spec/test/suite2/10_basic.yaml b/test/framework/src/test/resources/rest-api-spec/test/suite2/10_basic.yaml
similarity index 100%
rename from test-framework/src/test/resources/rest-api-spec/test/suite2/10_basic.yaml
rename to test/framework/src/test/resources/rest-api-spec/test/suite2/10_basic.yaml
diff --git a/test-framework/src/test/resources/rest-api-spec/test/suite2/15_test2.yaml b/test/framework/src/test/resources/rest-api-spec/test/suite2/15_test2.yaml
similarity index 100%
rename from test-framework/src/test/resources/rest-api-spec/test/suite2/15_test2.yaml
rename to test/framework/src/test/resources/rest-api-spec/test/suite2/15_test2.yaml